use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class StreamSegmentContainerTests method waitForSegmentInStorage.
private CompletableFuture<Void> waitForSegmentInStorage(SegmentProperties metadataProps, TestContext context) {
if (metadataProps.getLength() == 0) {
// Empty segments may or may not exist in Storage, so don't bother complicating ourselves with this.
return CompletableFuture.completedFuture(null);
}
// Check if the Storage Segment is caught up. If sealed, we want to make sure that both the Segment and its
// Attribute Segment are sealed (or the latter has been deleted - for transactions). For all other, we want to
// ensure that the length and truncation offsets have caught up.
BiFunction<SegmentProperties, SegmentProperties, Boolean> meetsConditions = (segmentProps, attrProps) -> metadataProps.isSealed() == (segmentProps.isSealed() && (attrProps.isSealed() || attrProps.isDeleted())) && segmentProps.getLength() >= metadataProps.getLength() && context.storageFactory.truncationOffsets.getOrDefault(metadataProps.getName(), 0L) >= metadataProps.getStartOffset();
String attributeSegmentName = NameUtils.getAttributeSegmentName(metadataProps.getName());
AtomicBoolean canContinue = new AtomicBoolean(true);
TimeoutTimer timer = new TimeoutTimer(TIMEOUT);
return Futures.loop(canContinue::get, () -> {
val segInfo = getStorageSegmentInfo(metadataProps.getName(), timer, context);
val attrInfo = getStorageSegmentInfo(attributeSegmentName, timer, context);
return CompletableFuture.allOf(segInfo, attrInfo).thenCompose(v -> {
if (meetsConditions.apply(segInfo.join(), attrInfo.join())) {
canContinue.set(false);
return CompletableFuture.completedFuture(null);
} else if (!timer.hasRemaining()) {
return Futures.failedFuture(new TimeoutException());
} else {
return Futures.delayedFuture(Duration.ofMillis(10), executorService());
}
}).thenRun(Runnables.doNothing());
}, executorService());
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class StreamSegmentContainerTests method tryActivate.
/**
* Attempts to activate the targetSegment in the given Container. Since we do not have access to the internals of the
* Container, we need to trigger this somehow, hence the need for this complex code. We need to trigger a truncation,
* so we need an 'appendSegment' to which we continuously append so that the DurableDataLog is truncated. After truncation,
* the Metadata should have enough leeway in making room for new activation.
*
* @return A Future that will complete either with an exception (failure) or SegmentProperties for the targetSegment.
*/
private CompletableFuture<SegmentProperties> tryActivate(MetadataCleanupContainer localContainer, String targetSegment, String appendSegment) {
CompletableFuture<SegmentProperties> successfulMap = new CompletableFuture<>();
// Append continuously to an existing segment in order to trigger truncations (these are necessary for forced evictions).
val appendFuture = localContainer.appendRandomly(appendSegment, false, () -> !successfulMap.isDone());
Futures.exceptionListener(appendFuture, successfulMap::completeExceptionally);
// Repeatedly try to get info on 'segment1' (activate it), until we succeed or time out.
TimeoutTimer remaining = new TimeoutTimer(TIMEOUT);
Futures.loop(() -> !successfulMap.isDone(), () -> Futures.delayedFuture(Duration.ofMillis(EVICTION_SEGMENT_EXPIRATION_MILLIS_SHORT), executorService()).thenCompose(v -> localContainer.getStreamSegmentInfo(targetSegment, TIMEOUT)).thenAccept(successfulMap::complete).exceptionally(ex -> {
if (!(Exceptions.unwrap(ex) instanceof TooManyActiveSegmentsException)) {
// Some other error.
successfulMap.completeExceptionally(ex);
} else if (!remaining.hasRemaining()) {
// Waited too long.
successfulMap.completeExceptionally(new TimeoutException("No successful activation could be done in the allotted time."));
}
// Try again.
return null;
}), executorService());
return successfulMap;
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class SegmentStateStore method get.
// endregion
// region AsyncMap implementation
@Override
public CompletableFuture<SegmentState> get(String segmentName, Duration timeout) {
String stateSegment = StreamSegmentNameUtils.getStateSegmentName(segmentName);
TimeoutTimer timer = new TimeoutTimer(timeout);
return this.storage.getStreamSegmentInfo(stateSegment, timer.getRemaining()).thenComposeAsync(sp -> {
if (sp.getLength() == 0) {
// Empty state files are treated the same as if they didn't exist.
return CompletableFuture.completedFuture(null);
} else {
return readSegmentState(sp, timer.getRemaining());
}
}, this.executor).exceptionally(this::handleSegmentNotExistsException);
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class StreamSegmentContainer method mergeTransaction.
@Override
public CompletableFuture<Void> mergeTransaction(String transactionName, Duration timeout) {
ensureRunning();
logRequest("mergeTransaction", transactionName);
this.metrics.mergeTxn();
TimeoutTimer timer = new TimeoutTimer(timeout);
return this.segmentMapper.getOrAssignStreamSegmentId(transactionName, timer.getRemaining(), transactionId -> {
SegmentMetadata transactionMetadata = this.metadata.getStreamSegmentMetadata(transactionId);
if (transactionMetadata == null) {
throw new CompletionException(new StreamSegmentNotExistsException(transactionName));
}
Operation op = new MergeTransactionOperation(transactionMetadata.getParentId(), transactionMetadata.getId());
return this.durableLog.add(op, timer.getRemaining());
}).thenComposeAsync(v -> this.stateStore.remove(transactionName, timer.getRemaining()), this.executor);
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class SegmentAggregator method flushNormally.
/**
* Repeatedly flushes the contents of the Aggregator to the Storage as long as something immediate needs to be flushed,
* such as a Seal or Merge operation.
*
* @param timer Timer for the operation.
* @return A CompletableFuture that, when completed, will contain the result from the flush operation.
*/
private CompletableFuture<FlushResult> flushNormally(TimeoutTimer timer) {
assert this.state.get() == AggregatorState.Writing : "flushNormally cannot be called if state == " + this.state;
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "flushNormally", this.operations.size());
FlushResult result = new FlushResult();
AtomicBoolean canContinue = new AtomicBoolean(true);
return Futures.loop(canContinue::get, () -> flushOnce(timer), partialResult -> {
canContinue.set(partialResult.getFlushedBytes() + partialResult.getMergedBytes() > 0);
result.withFlushResult(partialResult);
}, this.executor).thenApply(v -> {
LoggerHelpers.traceLeave(log, this.traceObjectId, "flushNormally", traceId, result);
return result;
});
}
Aggregations