use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class FixedKeyLengthTableSegmentLayout method compactIfNeeded.
private CompletableFuture<Void> compactIfNeeded(CompactionCandidate candidate) {
val compactor = new FixedKeyLengthTableCompactor(candidate.getSegment(), this.tableCompactorConfig, this.executor);
val timer = new TimeoutTimer(this.config.getRecoveryTimeout());
return compactor.isCompactionRequired().thenComposeAsync(isRequired -> {
if (isRequired) {
return compact(candidate.getSegment(), compactor, timer);
} else {
log.debug("{}: No compaction required at this time.", this.traceObjectId);
return CompletableFuture.completedFuture(null);
}
}, this.executor);
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class FixedKeyLengthTableSegmentLayout method newIterator.
private <T> CompletableFuture<AsyncIterator<IteratorItem<T>>> newIterator(@NonNull DirectSegmentAccess segment, @NonNull GetIteratorItem<T> getItems, @NonNull IteratorArgs args) {
Preconditions.checkArgument(args.getContinuationToken() == null, "ContinuationToken not supported for FixedKeyLengthTableSegments.");
val segmentKeyLength = getSegmentKeyLength(segment.getInfo());
val fromId = args.getFrom() == null ? AttributeId.Variable.minValue(segmentKeyLength) : AttributeId.from(args.getFrom().getCopy());
val toId = args.getTo() == null ? AttributeId.Variable.maxValue(segmentKeyLength) : AttributeId.from(args.getTo().getCopy());
val timer = new TimeoutTimer(args.getFetchTimeout());
return segment.attributeIterator(fromId, toId, timer.getRemaining()).thenApply(ai -> new TableIterator<>(ai, segment, getItems, timer));
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class FixedKeyLengthTableSegmentLayout method put.
@Override
CompletableFuture<List<Long>> put(@NonNull DirectSegmentAccess segment, @NonNull List<TableEntry> entries, long tableSegmentOffset, TimeoutTimer timer) {
val segmentInfo = segment.getInfo();
ensureSegmentType(segmentInfo.getName(), segmentInfo.getType());
val segmentKeyLength = getSegmentKeyLength(segmentInfo);
ensureValidKeyLength(segmentInfo.getName(), segmentKeyLength);
val attributeUpdates = new AttributeUpdateCollection();
int batchOffset = 0;
val batchOffsets = new ArrayList<Integer>();
boolean isConditional = false;
for (val e : entries) {
val key = e.getKey();
Preconditions.checkArgument(key.getKey().getLength() == segmentKeyLength, "Entry Key Length for key `%s` incompatible with segment '%s' which requires key lengths of %s.", key, segmentInfo.getName(), segmentKeyLength);
attributeUpdates.add(createIndexUpdate(key, batchOffset));
isConditional |= key.hasVersion();
batchOffsets.add(batchOffset);
batchOffset += this.serializer.getUpdateLength(e);
}
logRequest("put", segmentInfo.getName(), isConditional, tableSegmentOffset, entries.size(), batchOffset);
if (batchOffset > this.config.getMaxBatchSize()) {
throw new UpdateBatchTooLargeException(batchOffset, this.config.getMaxBatchSize());
}
// Update total number of entries in Table (this includes updates to the same key).
attributeUpdates.add(new AttributeUpdate(TableAttributes.TOTAL_ENTRY_COUNT, AttributeUpdateType.Accumulate, entries.size()));
val serializedEntries = this.serializer.serializeUpdate(entries);
val append = tableSegmentOffset == TableSegmentLayout.NO_OFFSET ? segment.append(serializedEntries, attributeUpdates, timer.getRemaining()) : segment.append(serializedEntries, attributeUpdates, tableSegmentOffset, timer.getRemaining());
return handleConditionalUpdateException(append, segmentInfo).thenApply(segmentOffset -> {
this.compactionService.process(new CompactionCandidate(segment));
return batchOffsets.stream().map(offset -> offset + segmentOffset).collect(Collectors.toList());
});
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class SegmentAggregator method flush.
// endregion
// region Flushing and Merging
/**
* Flushes the contents of the Aggregator to the Storage.
*
* @param force If true, force-flushes everything accumulated in the {@link SegmentAggregator}, regardless of
* the value returned by {@link #mustFlush()}.
* @param timeout Timeout for the operation.
* @return A CompletableFuture that, when completed, will contain a summary of the flush operation. If any errors
* occurred during the flush, the Future will be completed with the appropriate exception.
*/
@Override
public CompletableFuture<WriterFlushResult> flush(boolean force, Duration timeout) {
ensureInitializedAndNotClosed();
if (this.metadata.isDeletedInStorage()) {
// Segment has been deleted; don't do anything else.
return CompletableFuture.completedFuture(new WriterFlushResult());
}
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "flush");
TimeoutTimer timer = new TimeoutTimer(timeout);
CompletableFuture<WriterFlushResult> result;
try {
switch(this.state.get()) {
case Writing:
result = flushNormally(force, timer);
break;
case ReconciliationNeeded:
result = beginReconciliation(timer).thenComposeAsync(v -> reconcile(timer), this.executor);
break;
case Reconciling:
result = reconcile(timer);
break;
// $CASES-OMITTED$
default:
result = Futures.failedFuture(new IllegalStateException(String.format("Unexpected state for SegmentAggregator (%s) for segment '%s'.", this.state, this.metadata.getName())));
break;
}
} catch (Exception ex) {
// Convert synchronous errors into async errors - it's easier to handle on the receiving end.
result = Futures.failedFuture(ex);
}
return result.thenApply(r -> {
LoggerHelpers.traceLeave(log, this.traceObjectId, "flush", traceId, r);
return r;
});
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class SegmentAggregator method reconcile.
private CompletableFuture<WriterFlushResult> reconcile(TimeoutTimer timer) {
ReconciliationState rc = this.reconciliationState.get();
WriterFlushResult result = new WriterFlushResult();
if (rc == null) {
setState(AggregatorState.Writing);
return CompletableFuture.completedFuture(result);
} else if (this.hasDeletePending.get()) {
// Do not bother with anything else. If we know we are going to delete this segment, then do no bother doing
// any other kind of reconciliation work.
setState(AggregatorState.Writing);
return deleteSegment(timer);
}
SegmentProperties storageInfo = rc.getStorageInfo();
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "reconcile", rc);
// Process each Operation in sequence, as long as its starting offset is less than ReconciliationState.getStorageInfo().getLength()
AtomicBoolean exceededStorageLength = new AtomicBoolean(false);
return Futures.loop(() -> this.operations.size() > 0 && !exceededStorageLength.get(), () -> {
StorageOperation op = this.operations.getFirst();
return reconcileOperation(op, storageInfo, timer).thenApply(partialFlushResult -> {
if (op.getLastStreamSegmentOffset() >= storageInfo.getLength()) {
// This operation crosses the boundary of StorageLength. It has been reconciled,
// and as such it is the last operation that we need to inspect.
exceededStorageLength.set(true);
}
log.info("{}: Reconciled {} ({}).", this.traceObjectId, op, partialFlushResult);
return partialFlushResult;
});
}, result::withFlushResult, this.executor).thenApply(v -> {
updateMetadata(storageInfo);
this.reconciliationState.set(null);
setState(AggregatorState.Writing);
LoggerHelpers.traceLeave(log, this.traceObjectId, "reconcile", traceId, result);
return result;
});
}
Aggregations