use of com.google.common.base.Preconditions in project pravega by pravega.
the class ReadOperation method call.
@Override
public CompletableFuture<Integer> call() {
// Validate preconditions.
checkPreconditions();
log.debug("{} read - started op={}, segment={}, offset={}, length={}.", chunkedSegmentStorage.getLogPrefix(), System.identityHashCode(this), handle.getSegmentName(), offset, length);
val streamSegmentName = handle.getSegmentName();
return ChunkedSegmentStorage.tryWith(chunkedSegmentStorage.getMetadataStore().beginTransaction(true, streamSegmentName), txn -> txn.get(streamSegmentName).thenComposeAsync(storageMetadata -> {
segmentMetadata = (SegmentMetadata) storageMetadata;
// Validate preconditions.
checkState();
if (length == 0) {
return CompletableFuture.completedFuture(0);
}
return findChunkForOffset(txn).thenComposeAsync(v -> {
// Now read.
return readData(txn);
}, chunkedSegmentStorage.getExecutor()).exceptionally(ex -> {
log.debug("{} read - exception op={}, segment={}, offset={}, bytesRead={}.", chunkedSegmentStorage.getLogPrefix(), System.identityHashCode(this), handle.getSegmentName(), offset, totalBytesRead);
if (ex instanceof CompletionException) {
throw (CompletionException) ex;
}
throw new CompletionException(ex);
}).thenApplyAsync(v -> {
logEnd();
return totalBytesRead.get();
}, chunkedSegmentStorage.getExecutor());
}, chunkedSegmentStorage.getExecutor()), chunkedSegmentStorage.getExecutor());
}
use of com.google.common.base.Preconditions in project pravega by pravega.
the class TruncateOperation method call.
@Override
public CompletableFuture<Void> call() {
checkPreconditions();
log.debug("{} truncate - started op={}, segment={}, offset={}.", chunkedSegmentStorage.getLogPrefix(), System.identityHashCode(this), handle.getSegmentName(), offset);
val streamSegmentName = handle.getSegmentName();
return ChunkedSegmentStorage.tryWith(chunkedSegmentStorage.getMetadataStore().beginTransaction(false, streamSegmentName), txn -> txn.get(streamSegmentName).thenComposeAsync(storageMetadata -> {
segmentMetadata = (SegmentMetadata) storageMetadata;
// Check preconditions
checkPreconditions(streamSegmentName, segmentMetadata);
if (segmentMetadata.getStartOffset() >= offset) {
// Nothing to do
logEnd();
return CompletableFuture.completedFuture(null);
}
val oldChunkCount = segmentMetadata.getChunkCount();
val oldStartOffset = segmentMetadata.getStartOffset();
return updateFirstChunk(txn).thenComposeAsync(v -> relocateFirstChunkIfRequired(txn), chunkedSegmentStorage.getExecutor()).thenComposeAsync(v -> deleteChunks(txn).thenComposeAsync(vvv -> {
txn.update(segmentMetadata);
// Check invariants.
segmentMetadata.checkInvariants();
Preconditions.checkState(segmentMetadata.getLength() == oldLength, "truncate should not change segment length. oldLength=%s Segment=%s", oldLength, segmentMetadata);
Preconditions.checkState(oldChunkCount - chunksToDelete.size() + (isFirstChunkRelocated ? 1 : 0) == segmentMetadata.getChunkCount(), "Number of chunks do not match. old value (%s) - number of chunks deleted (%s) + number of chunks added (%s) must match current chunk count(%s)", oldChunkCount, chunksToDelete.size(), segmentMetadata.getChunkCount());
if (isFirstChunkRelocated) {
Preconditions.checkState(segmentMetadata.getFirstChunkStartOffset() == segmentMetadata.getStartOffset(), "After relocation of first chunk FirstChunkStartOffset (%) must match StartOffset (%s)", segmentMetadata.getFirstChunkStartOffset(), segmentMetadata.getStartOffset());
}
if (null != currentMetadata && null != segmentMetadata.getFirstChunk()) {
Preconditions.checkState(segmentMetadata.getFirstChunk().equals(currentMetadata.getName()), "First chunk name must match current metadata. Expected = %s Actual = %s", segmentMetadata.getFirstChunk(), currentMetadata.getName());
Preconditions.checkState(segmentMetadata.getStartOffset() <= segmentMetadata.getFirstChunkStartOffset() + currentMetadata.getLength(), "segment start offset (%s) must be less than or equal to first chunk start offset (%s)+ first chunk length (%s)", segmentMetadata.getStartOffset(), segmentMetadata.getFirstChunkStartOffset(), currentMetadata.getLength());
if (segmentMetadata.getChunkCount() == 1) {
Preconditions.checkState(segmentMetadata.getLength() - segmentMetadata.getFirstChunkStartOffset() == currentMetadata.getLength(), "Length of first chunk (%s) must match segment length (%s) - first chunk start offset (%s) when there is only one chunk", currentMetadata.getLength(), segmentMetadata.getLength(), segmentMetadata.getFirstChunkStartOffset());
}
}
// To avoid possibility of unintentional deadlock, skip this step for storage system segments.
if (!segmentMetadata.isStorageSystemSegment()) {
chunkedSegmentStorage.deleteBlockIndexEntriesForChunk(txn, streamSegmentName, oldStartOffset, segmentMetadata.getStartOffset());
}
// Collect garbage.
return chunkedSegmentStorage.getGarbageCollector().addChunksToGarbage(txn.getVersion(), chunksToDelete).thenComposeAsync(vv -> {
// Finally, commit.
return commit(txn).handleAsync(this::handleException, chunkedSegmentStorage.getExecutor()).thenRunAsync(this::postCommit, chunkedSegmentStorage.getExecutor());
}, chunkedSegmentStorage.getExecutor());
}, chunkedSegmentStorage.getExecutor()), chunkedSegmentStorage.getExecutor());
}, chunkedSegmentStorage.getExecutor()), chunkedSegmentStorage.getExecutor());
}
use of com.google.common.base.Preconditions in project pravega by pravega.
the class ChunkedSegmentStorage method delete.
@Override
public CompletableFuture<Void> delete(SegmentHandle handle, Duration timeout) {
checkInitialized();
if (null == handle) {
return CompletableFuture.failedFuture(new IllegalArgumentException("handle must not be null"));
}
return executeSerialized(() -> {
val traceId = LoggerHelpers.traceEnter(log, "delete", handle);
log.debug("{} delete - started segment={}.", logPrefix, handle.getSegmentName());
val timer = new Timer();
val streamSegmentName = handle.getSegmentName();
return tryWith(metadataStore.beginTransaction(false, streamSegmentName), txn -> txn.get(streamSegmentName).thenComposeAsync(storageMetadata -> {
val segmentMetadata = (SegmentMetadata) storageMetadata;
// Check preconditions
checkSegmentExists(streamSegmentName, segmentMetadata);
checkOwnership(streamSegmentName, segmentMetadata);
segmentMetadata.setActive(false);
txn.update(segmentMetadata);
// Collect garbage
return garbageCollector.addSegmentToGarbage(txn.getVersion(), streamSegmentName).thenComposeAsync(vv -> {
// Commit metadata.
return txn.commit().thenRunAsync(() -> {
// Update the read index.
readIndexCache.remove(streamSegmentName);
val elapsed = timer.getElapsed();
SLTS_DELETE_LATENCY.reportSuccessEvent(elapsed);
SLTS_DELETE_COUNT.inc();
log.debug("{} delete - finished segment={}, latency={}.", logPrefix, handle.getSegmentName(), elapsed.toMillis());
LoggerHelpers.traceLeave(log, "delete", traceId, handle);
}, executor);
}, executor);
}, executor), executor).exceptionally(ex -> {
log.warn("{} delete - exception segment={}, latency={}.", logPrefix, handle.getSegmentName(), timer.getElapsedMillis(), ex);
handleException(streamSegmentName, ex);
return null;
});
}, handle.getSegmentName());
}
use of com.google.common.base.Preconditions in project pravega by pravega.
the class WriteOperation method call.
@Override
public CompletableFuture<Void> call() {
// Validate preconditions.
checkPreconditions();
log.debug("{} write - started op={}, segment={}, offset={} length={}.", chunkedSegmentStorage.getLogPrefix(), System.identityHashCode(this), handle.getSegmentName(), offset, length);
val streamSegmentName = handle.getSegmentName();
return ChunkedSegmentStorage.tryWith(chunkedSegmentStorage.getMetadataStore().beginTransaction(false, handle.getSegmentName()), txn -> {
didSegmentLayoutChange = false;
// Retrieve metadata.
return txn.get(streamSegmentName).thenComposeAsync(storageMetadata -> {
segmentMetadata = (SegmentMetadata) storageMetadata;
// Validate preconditions.
checkState();
isSystemSegment = chunkedSegmentStorage.isStorageSystemSegment(segmentMetadata);
// Check if this is a first write after ownership changed.
isFirstWriteAfterFailover = segmentMetadata.isOwnershipChanged();
lastChunkMetadata.set(null);
chunkHandle = null;
bytesRemaining.set(length);
currentOffset.set(offset);
return getLastChunk(txn).thenComposeAsync(v -> writeData(txn).thenComposeAsync(vv -> commit(txn).thenApplyAsync(vvvv -> postCommit(), chunkedSegmentStorage.getExecutor()).exceptionally(this::handleException), chunkedSegmentStorage.getExecutor()).thenRunAsync(this::logEnd, chunkedSegmentStorage.getExecutor()), chunkedSegmentStorage.getExecutor());
}, chunkedSegmentStorage.getExecutor());
}, chunkedSegmentStorage.getExecutor()).exceptionally(ex -> (Void) handleException(ex));
}
use of com.google.common.base.Preconditions in project pravega by pravega.
the class PersistentStreamBase method submitScale.
/**
* This method attempts to start a new scale workflow. For this it first computes epoch transition and stores it
* in the metadastore.
* This method can be called by manual scale or during the processing of auto-scale event. Which means there could be
* concurrent calls to this method.
*
* @param segmentsToSeal segments that will be sealed at the end of this scale operation.
* @param newRanges key ranges of new segments to be created
* @param scaleTimestamp scaling timestamp
* @return : list of newly created segments with current epoch
*/
@Override
public CompletableFuture<VersionedMetadata<EpochTransitionRecord>> submitScale(final List<Long> segmentsToSeal, final List<Map.Entry<Double, Double>> newRanges, final long scaleTimestamp, final VersionedMetadata<EpochTransitionRecord> existing, OperationContext context) {
Preconditions.checkNotNull(context, "Operation context cannot be null");
return verifyNotSealed(context).thenCompose(v -> {
if (existing == null) {
return getEpochTransition(context);
} else {
return CompletableFuture.completedFuture(existing);
}
}).thenCompose(record -> getActiveEpochRecord(true, context).thenCompose(currentEpoch -> getConfiguration(context).thenCompose(config -> {
if (!record.getObject().equals(EpochTransitionRecord.EMPTY)) {
// and new ranges are identical). else throw scale conflict exception
if (!RecordHelper.verifyRecordMatchesInput(segmentsToSeal, newRanges, false, record.getObject())) {
log.debug(context.getRequestId(), "scale conflict, another scale operation is ongoing");
throw new EpochTransitionOperationExceptions.ConflictException();
}
return CompletableFuture.completedFuture(record);
} else {
// check input is valid and satisfies preconditions
if (!RecordHelper.canScaleFor(segmentsToSeal, currentEpoch)) {
return updateEpochTransitionNode(new VersionedMetadata<>(EpochTransitionRecord.EMPTY, record.getVersion()), context).thenApply(x -> {
log.warn(context.getRequestId(), "scale precondition failed {}", segmentsToSeal);
throw new EpochTransitionOperationExceptions.PreConditionFailureException();
});
}
if (!RecordHelper.validateInputRange(segmentsToSeal, newRanges, currentEpoch)) {
log.error(context.getRequestId(), "scale input invalid {} {}", segmentsToSeal, newRanges);
throw new EpochTransitionOperationExceptions.InputInvalidException();
}
int numberOfSegmentsPostScale = currentEpoch.getSegments().size() - segmentsToSeal.size() + newRanges.size();
if (numberOfSegmentsPostScale < config.getScalingPolicy().getMinNumSegments()) {
log.warn(context.getRequestId(), "Scale cannot be performed as Min Segment Count will not hold {} {}", segmentsToSeal, newRanges);
throw new EpochTransitionOperationExceptions.PreConditionFailureException();
}
EpochTransitionRecord epochTransition = RecordHelper.computeEpochTransition(currentEpoch, segmentsToSeal, newRanges, scaleTimestamp);
return updateEpochTransitionNode(new VersionedMetadata<>(epochTransition, record.getVersion()), context).thenApply(version -> {
log.info(context.getRequestId(), "scale for stream {}/{} accepted. Segments to seal = {}", scope, name, epochTransition.getSegmentsToSeal());
return new VersionedMetadata<>(epochTransition, version);
});
}
})));
}
Aggregations