use of io.pravega.segmentstore.contracts.SegmentProperties in project pravega by pravega.
the class StreamSegmentContainerTests method waitForSegmentInStorage.
private CompletableFuture<Void> waitForSegmentInStorage(SegmentProperties metadataProps, TestContext context) {
Function<SegmentProperties, Boolean> meetsConditions = storageProps -> storageProps.isSealed() == metadataProps.isSealed() && storageProps.getLength() >= metadataProps.getLength() && context.storageFactory.truncationOffsets.getOrDefault(metadataProps.getName(), 0L) >= metadataProps.getStartOffset();
AtomicBoolean canContinue = new AtomicBoolean(true);
TimeoutTimer timer = new TimeoutTimer(TIMEOUT);
return Futures.loop(canContinue::get, () -> context.storage.getStreamSegmentInfo(metadataProps.getName(), TIMEOUT).thenCompose(storageProps -> {
if (meetsConditions.apply(storageProps)) {
canContinue.set(false);
return CompletableFuture.completedFuture(null);
} else if (!timer.hasRemaining()) {
return Futures.failedFuture(new TimeoutException());
} else {
return Futures.delayedFuture(Duration.ofMillis(10), executorService());
}
}).thenRun(Runnables.doNothing()), executorService());
}
use of io.pravega.segmentstore.contracts.SegmentProperties in project pravega by pravega.
the class StreamSegmentMapperTests method testGetStreamSegmentInfoWithConcurrency.
/**
* Tests GetStreamSegmentInfo when it is invoked in parallel with a Segment assignment.
*/
@Test
public void testGetStreamSegmentInfoWithConcurrency() throws Exception {
// is driven by the same code for Transactions as well.
final String segmentName = "Segment";
final long segmentId = 1;
final SegmentProperties storageInfo = StreamSegmentInformation.builder().name(segmentName).length(123).sealed(true).build();
final long metadataLength = storageInfo.getLength() + 1;
HashSet<String> storageSegments = new HashSet<>();
storageSegments.add(segmentName);
@Cleanup TestContext context = new TestContext();
AtomicInteger storageGetCount = new AtomicInteger();
setupStorageGetHandler(context, storageSegments, sn -> {
storageGetCount.incrementAndGet();
return storageInfo;
});
setSavedState(segmentName, segmentId, 0L, ATTRIBUTE_COUNT, context);
val segmentState = context.stateStore.get(segmentName, TIMEOUT).join();
Map<UUID, Long> expectedAttributes = segmentState == null ? null : segmentState.getAttributes();
CompletableFuture<Void> addInvoked = new CompletableFuture<>();
context.operationLog.addHandler = op -> {
addInvoked.join();
// Need to set SegmentId on operation.
StreamSegmentMapOperation sop = (StreamSegmentMapOperation) op;
UpdateableSegmentMetadata segmentMetadata = context.metadata.mapStreamSegmentId(segmentName, segmentId);
segmentMetadata.setStorageLength(sop.getLength());
segmentMetadata.setLength(metadataLength);
segmentMetadata.updateAttributes(expectedAttributes);
if (sop.isSealed()) {
segmentMetadata.markSealed();
}
return CompletableFuture.completedFuture(null);
};
// Second call is designed to hit when the first call still tries to assign the id, hence we test normal queueing.
context.mapper.getOrAssignStreamSegmentId(segmentName, TIMEOUT, id -> CompletableFuture.completedFuture(null));
// Concurrently with the map, request a Segment Info.
CompletableFuture<SegmentProperties> segmentInfoFuture = context.mapper.getStreamSegmentInfo(segmentName, TIMEOUT);
Assert.assertFalse("getSegmentInfo returned a completed future.", segmentInfoFuture.isDone());
// Release the OperationLog add and verify the Segment Info has been served with information from the Metadata.
addInvoked.complete(null);
SegmentProperties segmentInfo = segmentInfoFuture.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
val expectedInfo = context.metadata.getStreamSegmentMetadata(segmentId);
assertEquals("Unexpected Segment Info returned.", expectedInfo, segmentInfo);
SegmentMetadataComparer.assertSameAttributes("Unexpected attributes returned.", expectedInfo.getAttributes(), segmentInfo);
}
use of io.pravega.segmentstore.contracts.SegmentProperties in project pravega by pravega.
the class SegmentAggregator method reconcileAppendOperation.
/**
* Attempts to reconcile the given Append Operation. Since Append Operations can be partially flushed, reconciliation
* may be for the full operation or for a part of it.
*
* @param op The Operation (StreamSegmentAppendOperation or CachedStreamSegmentAppendOperation) to reconcile.
* @param storageInfo The current state of the Segment in Storage.
* @param timer Timer for the operation.
* @return A CompletableFuture containing a FlushResult with the number of bytes reconciled, or failed with a ReconciliationFailureException,
* if the operation cannot be reconciled, based on the in-memory metadata or the current state of the Segment in Storage.
*/
private CompletableFuture<FlushResult> reconcileAppendOperation(StorageOperation op, SegmentProperties storageInfo, TimeoutTimer timer) {
Preconditions.checkArgument(op instanceof AggregatedAppendOperation, "Not given an append operation.");
// Read data from Storage, and compare byte-by-byte.
InputStream appendStream = this.dataSource.getAppendData(op.getStreamSegmentId(), op.getStreamSegmentOffset(), (int) op.getLength());
if (appendStream == null) {
return Futures.failedFuture(new ReconciliationFailureException(String.format("Unable to reconcile operation '%s' because no append data is associated with it.", op), this.metadata, storageInfo));
}
// Only read as much data as we need.
long readLength = Math.min(op.getLastStreamSegmentOffset(), storageInfo.getLength()) - op.getStreamSegmentOffset();
assert readLength > 0 : "Append Operation to be reconciled is beyond the Segment's StorageLength " + op;
AtomicInteger bytesReadSoFar = new AtomicInteger();
// Read all data from storage.
byte[] storageData = new byte[(int) readLength];
return Futures.loop(() -> bytesReadSoFar.get() < readLength, () -> this.storage.read(this.handle.get(), op.getStreamSegmentOffset() + bytesReadSoFar.get(), storageData, bytesReadSoFar.get(), (int) readLength - bytesReadSoFar.get(), timer.getRemaining()), bytesRead -> {
assert bytesRead > 0 : String.format("Unable to make any read progress when reconciling operation '%s' after reading %s bytes.", op, bytesReadSoFar);
bytesReadSoFar.addAndGet(bytesRead);
}, this.executor).thenApplyAsync(v -> {
// Compare, byte-by-byte, the contents of the append.
verifySame(appendStream, storageData, op, storageInfo);
if (readLength >= op.getLength() && op.getLastStreamSegmentOffset() <= storageInfo.getLength()) {
// Operation has been completely validated; pop it off the list.
StorageOperation removedOp = this.operations.removeFirst();
assert op == removedOp : "Reconciled operation is not the same as removed operation";
}
return new FlushResult().withFlushedBytes(readLength);
}, this.executor);
}
use of io.pravega.segmentstore.contracts.SegmentProperties in project pravega by pravega.
the class StreamSegmentMapper method getStreamSegmentInfo.
// endregion
// region GetSegmentInfo
/**
* Gets information about a StreamSegment. If the Segment is active, it returns this information directly from the
* in-memory Metadata. If the Segment is not active, it fetches the information from Storage and returns it, without
* activating the segment in the Metadata or otherwise touching the DurableLog.
*
* @param streamSegmentName The case-sensitive StreamSegment Name.
* @param timeout Timeout for the Operation.
* @return A CompletableFuture that, when complete, will contain a SegmentProperties object with the desired
* information. If failed, it will contain the exception that caused the failure.
*/
CompletableFuture<SegmentProperties> getStreamSegmentInfo(String streamSegmentName, Duration timeout) {
long streamSegmentId = this.containerMetadata.getStreamSegmentId(streamSegmentName, true);
CompletableFuture<SegmentProperties> result;
if (isValidStreamSegmentId(streamSegmentId)) {
// Looks like the Segment is active and we have it in our Metadata. Return the result from there.
SegmentMetadata sm = this.containerMetadata.getStreamSegmentMetadata(streamSegmentId);
if (sm.isDeleted()) {
result = Futures.failedFuture(new StreamSegmentNotExistsException(streamSegmentName));
} else {
result = CompletableFuture.completedFuture(sm.getSnapshot());
}
} else {
// The Segment is not yet active.
// First, check to see if we have a pending assignment. If so, piggyback on that.
QueuedCallback<SegmentProperties> queuedCallback = checkConcurrentAssignment(streamSegmentName, id -> CompletableFuture.completedFuture(this.containerMetadata.getStreamSegmentMetadata(id).getSnapshot()));
if (queuedCallback != null) {
result = queuedCallback.result;
} else {
// Not in metadata and no concurrent assignments. Go to Storage and get what's needed.
result = getSegmentInfoFromStorage(streamSegmentName, timeout);
}
}
return result;
}
use of io.pravega.segmentstore.contracts.SegmentProperties in project pravega by pravega.
the class StreamSegmentMapper method createNewTransactionStreamSegment.
/**
* Creates a new Transaction StreamSegment for an existing Parent StreamSegment and persists the given attributes (in Storage).
*
* @param parentStreamSegmentName The case-sensitive StreamSegment Name of the Parent StreamSegment.
* @param transactionId A unique identifier for the transaction to be created.
* @param attributes The initial attributes for the Transaction, if any.
* @param timeout Timeout for the operation.
* @return A CompletableFuture that, when completed normally, will contain the name of the newly created Transaction StreamSegment.
* If the operation failed, this will contain the exception that caused the failure.
* @throws IllegalArgumentException If the given parent StreamSegment cannot have a Transaction (because it is deleted, sealed, inexistent).
*/
public CompletableFuture<String> createNewTransactionStreamSegment(String parentStreamSegmentName, UUID transactionId, Collection<AttributeUpdate> attributes, Duration timeout) {
long traceId = LoggerHelpers.traceEnterWithContext(log, traceObjectId, "createNewTransactionStreamSegment", parentStreamSegmentName);
// We cannot create a Transaction StreamSegment for a what looks like another Transaction.
Exceptions.checkArgument(StreamSegmentNameUtils.getParentStreamSegmentName(parentStreamSegmentName) == null, "parentStreamSegmentName", "Cannot create a Transaction for a Transaction.");
// Validate that Parent StreamSegment exists.
TimeoutTimer timer = new TimeoutTimer(timeout);
CompletableFuture<Void> parentCheck = null;
long mappedParentId = this.containerMetadata.getStreamSegmentId(parentStreamSegmentName, true);
if (isValidStreamSegmentId(mappedParentId)) {
SegmentProperties parentInfo = this.containerMetadata.getStreamSegmentMetadata(mappedParentId);
if (parentInfo != null) {
parentCheck = validateParentSegmentEligibility(parentInfo);
}
}
if (parentCheck == null) {
// The parent is not registered in the metadata. Get required info from Storage and don't map it unnecessarily.
parentCheck = this.storage.getStreamSegmentInfo(parentStreamSegmentName, timer.getRemaining()).thenCompose(this::validateParentSegmentEligibility);
}
String transactionName = StreamSegmentNameUtils.getTransactionNameFromId(parentStreamSegmentName, transactionId);
return parentCheck.thenComposeAsync(parentId -> createSegmentInStorageWithRecovery(transactionName, attributes, timer), this.executor).thenApply(v -> {
LoggerHelpers.traceLeave(log, traceObjectId, "createNewTransactionStreamSegment", traceId, parentStreamSegmentName, transactionName);
return transactionName;
});
}
Aggregations