use of io.pravega.segmentstore.contracts.StreamSegmentNotExistsException in project pravega by pravega.
the class StreamSegmentMapper method getOrAssignStreamSegmentId.
// endregion
// region Segment Id Assignment
/**
* Attempts to get an existing StreamSegmentId for the given case-sensitive StreamSegment Name, and then invokes the
* given Function with the Id.
* * If the Segment is already mapped in the Metadata, the existing Id is used.
* * Otherwise if the Segment had previously been assigned an id (and saved in the State Store), that Id will be
* reused.
* * Otherwise, it atomically assigns a new Id and stores it in the Metadata and DurableLog.
* <p>
* If multiple requests for assignment arrive for the same StreamSegment in parallel (or while an assignment is in progress),
* they will be queued up in the order received and will be invoked in the same order after assignment
* <p>
* If the given streamSegmentName refers to a Transaction StreamSegment, this will attempt to validate that the Transaction is still
* valid, by which means it will check the Parent's existence alongside the Transaction's existence.
*
* @param streamSegmentName The case-sensitive StreamSegment Name.
* @param timeout The timeout for the operation.
* @param thenCompose A Function that consumes a StreamSegmentId and returns a CompletableFuture that will indicate
* when the consumption of that StreamSegmentId is complete. This Function will be invoked
* synchronously if the StreamSegmentId is already mapped, or async, otherwise, after assignment.
* @param <T> Type of the return value.
* @return A CompletableFuture that, when completed normally, will contain the result of the given Function (thenCompose)
* applied to the assigned/retrieved StreamSegmentId. If failed, this will contain the exception that caused the failure.
*/
<T> CompletableFuture<T> getOrAssignStreamSegmentId(String streamSegmentName, Duration timeout, Function<Long, CompletableFuture<T>> thenCompose) {
// Check to see if the metadata already knows about this Segment.
Preconditions.checkNotNull(thenCompose, "thenCompose");
long streamSegmentId = this.containerMetadata.getStreamSegmentId(streamSegmentName, true);
if (isValidStreamSegmentId(streamSegmentId)) {
// We already have a value, just return it (but make sure the Segment has not been deleted).
if (this.containerMetadata.getStreamSegmentMetadata(streamSegmentId).isDeleted()) {
return Futures.failedFuture(new StreamSegmentNotExistsException(streamSegmentName));
} else {
// Even though we have the value in the metadata, we need to be very careful not to invoke this callback
// before any other existing callbacks are invoked. As such, verify if we have an existing PendingRequest
// for this segment - if so, tag onto it so we invoke these callbacks in the correct order.
QueuedCallback<T> queuedCallback = checkConcurrentAssignment(streamSegmentName, thenCompose);
return queuedCallback == null ? thenCompose.apply(streamSegmentId) : queuedCallback.result;
}
}
// See if anyone else is currently waiting to get this StreamSegment's id.
QueuedCallback<T> queuedCallback;
boolean needsAssignment = false;
synchronized (this.assignmentLock) {
PendingRequest pendingRequest = this.pendingRequests.getOrDefault(streamSegmentName, null);
if (pendingRequest == null) {
needsAssignment = true;
pendingRequest = new PendingRequest();
this.pendingRequests.put(streamSegmentName, pendingRequest);
}
queuedCallback = new QueuedCallback<>(thenCompose);
pendingRequest.callbacks.add(queuedCallback);
}
// We are the first/only ones requesting this id; go ahead and assign an id.
if (needsAssignment) {
// Determine if given StreamSegmentName is actually a Transaction StreamSegmentName.
String parentStreamSegmentName = StreamSegmentNameUtils.getParentStreamSegmentName(streamSegmentName);
if (parentStreamSegmentName == null) {
// Stand-alone StreamSegment.
this.executor.execute(() -> assignStreamSegmentId(streamSegmentName, timeout));
} else {
this.executor.execute(() -> assignTransactionStreamSegmentId(streamSegmentName, parentStreamSegmentName, timeout));
}
}
return queuedCallback.result;
}
use of io.pravega.segmentstore.contracts.StreamSegmentNotExistsException in project pravega by pravega.
the class StreamSegmentMapper method getStreamSegmentInfo.
// endregion
// region GetSegmentInfo
/**
* Gets information about a StreamSegment. If the Segment is active, it returns this information directly from the
* in-memory Metadata. If the Segment is not active, it fetches the information from Storage and returns it, without
* activating the segment in the Metadata or otherwise touching the DurableLog.
*
* @param streamSegmentName The case-sensitive StreamSegment Name.
* @param timeout Timeout for the Operation.
* @return A CompletableFuture that, when complete, will contain a SegmentProperties object with the desired
* information. If failed, it will contain the exception that caused the failure.
*/
CompletableFuture<SegmentProperties> getStreamSegmentInfo(String streamSegmentName, Duration timeout) {
long streamSegmentId = this.containerMetadata.getStreamSegmentId(streamSegmentName, true);
CompletableFuture<SegmentProperties> result;
if (isValidStreamSegmentId(streamSegmentId)) {
// Looks like the Segment is active and we have it in our Metadata. Return the result from there.
SegmentMetadata sm = this.containerMetadata.getStreamSegmentMetadata(streamSegmentId);
if (sm.isDeleted()) {
result = Futures.failedFuture(new StreamSegmentNotExistsException(streamSegmentName));
} else {
result = CompletableFuture.completedFuture(sm.getSnapshot());
}
} else {
// The Segment is not yet active.
// First, check to see if we have a pending assignment. If so, piggyback on that.
QueuedCallback<SegmentProperties> queuedCallback = checkConcurrentAssignment(streamSegmentName, id -> CompletableFuture.completedFuture(this.containerMetadata.getStreamSegmentMetadata(id).getSnapshot()));
if (queuedCallback != null) {
result = queuedCallback.result;
} else {
// Not in metadata and no concurrent assignments. Go to Storage and get what's needed.
result = getSegmentInfoFromStorage(streamSegmentName, timeout);
}
}
return result;
}
use of io.pravega.segmentstore.contracts.StreamSegmentNotExistsException in project pravega by pravega.
the class ContainerMetadataUpdateTransaction method preProcessOperation.
/**
* Pre-processes the given Operation. See OperationMetadataUpdater.preProcessOperation for more details on behavior.
*
* @param operation The operation to pre-process.
* @throws ContainerException If the given operation was rejected given the current state of the container metadata.
* @throws StreamSegmentException If the given operation was incompatible with the current state of the Segment.
* For example: StreamSegmentNotExistsException, StreamSegmentSealedException or
* StreamSegmentMergedException.
*/
void preProcessOperation(Operation operation) throws ContainerException, StreamSegmentException {
checkNotSealed();
SegmentMetadataUpdateTransaction segmentMetadata = null;
if (operation instanceof SegmentOperation) {
segmentMetadata = getSegmentUpdateTransaction(((SegmentOperation) operation).getStreamSegmentId());
if (segmentMetadata.isDeleted()) {
throw new StreamSegmentNotExistsException(segmentMetadata.getName());
}
}
if (operation instanceof StreamSegmentAppendOperation) {
segmentMetadata.preProcessOperation((StreamSegmentAppendOperation) operation);
} else if (operation instanceof StreamSegmentSealOperation) {
segmentMetadata.preProcessOperation((StreamSegmentSealOperation) operation);
} else if (operation instanceof MergeTransactionOperation) {
MergeTransactionOperation mbe = (MergeTransactionOperation) operation;
SegmentMetadataUpdateTransaction transactionMetadata = getSegmentUpdateTransaction(mbe.getTransactionSegmentId());
transactionMetadata.preProcessAsTransactionSegment(mbe);
segmentMetadata.preProcessAsParentSegment(mbe, transactionMetadata);
} else if (operation instanceof StreamSegmentMapOperation) {
preProcessMetadataOperation((StreamSegmentMapOperation) operation);
} else if (operation instanceof UpdateAttributesOperation) {
segmentMetadata.preProcessOperation((UpdateAttributesOperation) operation);
} else if (operation instanceof MetadataCheckpointOperation) {
// MetadataCheckpointOperations do not require preProcess and accept; they can be handled in a single stage.
processMetadataOperation((MetadataCheckpointOperation) operation);
} else if (operation instanceof StorageMetadataCheckpointOperation) {
// StorageMetadataCheckpointOperation do not require preProcess and accept; they can be handled in a single stage.
processMetadataOperation((StorageMetadataCheckpointOperation) operation);
} else if (operation instanceof StreamSegmentTruncateOperation) {
segmentMetadata.preProcessOperation((StreamSegmentTruncateOperation) operation);
}
}
use of io.pravega.segmentstore.contracts.StreamSegmentNotExistsException in project pravega by pravega.
the class ContainerReadIndexTests method testCacheEviction.
/**
* Tests the ability to evict entries from the ReadIndex under various conditions:
* * If an entry is aged out
* * If an entry is pushed out because of cache space pressure.
* <p>
* This also verifies that certain entries, such as RedirectReadIndexEntries and entries after the Storage Offset are
* not removed.
* <p>
* The way this test goes is as follows (it's pretty subtle, because there aren't many ways to hook into the ReadIndex and see what it's doing)
* 1. It creates a bunch of segments, and populates them in storage (each) up to offset N/2-1 (this is called pre-storage)
* 2. It populates the ReadIndex for each of those segments from offset N/2 to offset N-1 (this is called post-storage)
* 3. It loads all the data from Storage into the ReadIndex, in entries of size equal to those already loaded in step #2.
* 3a. At this point, all the entries added in step #2 have Generations 0..A/4-1, and step #3 have generations A/4..A-1
* 4. Append more data at the end. This forces the generation to increase to 1.25A.
* 4a. Nothing should be evicted from the cache now, since the earliest items are all post-storage.
* 5. We 'touch' (read) the first 1/3 of pre-storage entries (offsets 0..N/4).
* 5a. At this point, those entries (offsets 0..N/6) will have the newest generations (1.25A..1.5A)
* 6. We append more data (equivalent to the data we touched)
* 6a. Nothing should be evicted, since those generations that were just eligible for removal were touched and bumped up.
* 7. We forcefully increase the current generation by 1 (without touching the ReadIndex)
* 7a. At this point, we expect all the pre-storage items, except the touched ones, to be evicted. This is generations 0.25A-0.75A.
* 8. Update the metadata and indicate that all the post-storage entries are now pre-storage and bump the generation by 0.75A.
* 8a. At this point, we expect all former post-storage items and pre-storage items to be evicted (in this order).
* <p>
* The final order of eviction (in terms of offsets, for each segment), is:
* * 0.25N-0.75N, 0.75N..N, N..1.25N, 0..0.25N, 1.25N..1.5N (remember that we added quite a bunch of items after the initial run).
*/
@Test
@SuppressWarnings("checkstyle:CyclomaticComplexity")
public void testCacheEviction() throws Exception {
// Create a CachePolicy with a set number of generations and a known max size.
// Each generation contains exactly one entry, so the number of generations is also the number of entries.
final int appendSize = 100;
// This also doubles as number of generations (each generation, we add one append for each segment).
final int entriesPerSegment = 100;
final int cacheMaxSize = SEGMENT_COUNT * entriesPerSegment * appendSize;
// 25% of the entries are beyond the StorageOffset
final int postStorageEntryCount = entriesPerSegment / 4;
// 75% of the entries are before the StorageOffset.
final int preStorageEntryCount = entriesPerSegment - postStorageEntryCount;
CachePolicy cachePolicy = new CachePolicy(cacheMaxSize, Duration.ofMillis(1000 * 2 * entriesPerSegment), Duration.ofMillis(1000));
// To properly test this, we want predictable storage reads.
ReadIndexConfig config = ConfigHelpers.withInfiniteCachePolicy(ReadIndexConfig.builder().with(ReadIndexConfig.STORAGE_READ_ALIGNMENT, appendSize)).build();
ArrayList<CacheKey> removedKeys = new ArrayList<>();
@Cleanup TestContext context = new TestContext(config, cachePolicy);
// Record every cache removal.
context.cacheFactory.cache.removeCallback = removedKeys::add;
// Create the segments (metadata + storage).
ArrayList<Long> segmentIds = createSegments(context);
createSegmentsInStorage(context);
// Populate the Storage with appropriate data.
byte[] preStorageData = new byte[preStorageEntryCount * appendSize];
for (long segmentId : segmentIds) {
UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
val handle = context.storage.openWrite(sm.getName()).join();
context.storage.write(handle, 0, new ByteArrayInputStream(preStorageData), preStorageData.length, TIMEOUT).join();
sm.setStorageLength(preStorageData.length);
sm.setLength(preStorageData.length);
}
// Callback that appends one entry at the end of the given segment id.
Consumer<Long> appendOneEntry = segmentId -> {
UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
byte[] data = new byte[appendSize];
long offset = sm.getLength();
sm.setLength(offset + data.length);
try {
context.readIndex.append(segmentId, offset, data);
} catch (StreamSegmentNotExistsException ex) {
throw new CompletionException(ex);
}
};
// Populate the ReadIndex with the Append entries (post-StorageOffset)
for (int i = 0; i < postStorageEntryCount; i++) {
segmentIds.forEach(appendOneEntry);
// Each time we make a round of appends (one per segment), we increment the generation in the CacheManager.
context.cacheManager.applyCachePolicy();
}
// Read all the data from Storage, making sure we carefully associate them with the proper generation.
for (int i = 0; i < preStorageEntryCount; i++) {
long offset = i * appendSize;
for (long segmentId : segmentIds) {
@Cleanup ReadResult result = context.readIndex.read(segmentId, offset, appendSize, TIMEOUT);
ReadResultEntry resultEntry = result.next();
Assert.assertEquals("Unexpected type of ReadResultEntry when trying to load up data into the ReadIndex Cache.", ReadResultEntryType.Storage, resultEntry.getType());
resultEntry.requestContent(TIMEOUT);
ReadResultEntryContents contents = resultEntry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertFalse("Not expecting more data to be available for reading.", result.hasNext());
Assert.assertEquals("Unexpected ReadResultEntry length when trying to load up data into the ReadIndex Cache.", appendSize, contents.getLength());
}
context.cacheManager.applyCachePolicy();
}
Assert.assertEquals("Not expecting any removed Cache entries at this point (cache is not full).", 0, removedKeys.size());
// Append more data (equivalent to all post-storage entries), and verify that NO entries are being evicted (we cannot evict post-storage entries).
for (int i = 0; i < postStorageEntryCount; i++) {
segmentIds.forEach(appendOneEntry);
context.cacheManager.applyCachePolicy();
}
Assert.assertEquals("Not expecting any removed Cache entries at this point (only eligible entries were post-storage).", 0, removedKeys.size());
// 'Touch' the first few entries read from storage. This should move them to the back of the queue (they won't be the first ones to be evicted).
int touchCount = preStorageEntryCount / 3;
for (int i = 0; i < touchCount; i++) {
long offset = i * appendSize;
for (long segmentId : segmentIds) {
@Cleanup ReadResult result = context.readIndex.read(segmentId, offset, appendSize, TIMEOUT);
ReadResultEntry resultEntry = result.next();
Assert.assertEquals("Unexpected type of ReadResultEntry when trying to load up data into the ReadIndex Cache.", ReadResultEntryType.Cache, resultEntry.getType());
}
}
// Append more data (equivalent to the amount of data we 'touched'), and verify that the entries we just touched are not being removed..
for (int i = 0; i < touchCount; i++) {
segmentIds.forEach(appendOneEntry);
context.cacheManager.applyCachePolicy();
}
Assert.assertEquals("Not expecting any removed Cache entries at this point (we touched old entries and they now have the newest generation).", 0, removedKeys.size());
// Increment the generations so that we are caught up to just before the generation where the "touched" items now live.
context.cacheManager.applyCachePolicy();
// We expect all but the 'touchCount' pre-Storage entries to be removed.
int expectedRemovalCount = (preStorageEntryCount - touchCount) * SEGMENT_COUNT;
Assert.assertEquals("Unexpected number of removed entries after having forced out all pre-storage entries.", expectedRemovalCount, removedKeys.size());
// Now update the metadata and indicate that all the post-storage data has been moved to storage.
segmentIds.forEach(segmentId -> {
UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
sm.setStorageLength(sm.getLength());
});
// We add one artificial entry, which we'll be touching forever and ever; this forces the CacheManager to
// update its current generation every time. We will be ignoring this entry for our test.
SegmentMetadata readSegment = context.metadata.getStreamSegmentMetadata(segmentIds.get(0));
appendOneEntry.accept(readSegment.getId());
// Now evict everything (whether by size of by aging out).
for (int i = 0; i < cachePolicy.getMaxGenerations(); i++) {
@Cleanup ReadResult result = context.readIndex.read(readSegment.getId(), readSegment.getLength() - appendSize, appendSize, TIMEOUT);
result.next();
context.cacheManager.applyCachePolicy();
}
int expectedRemovalCountPerSegment = entriesPerSegment + touchCount + postStorageEntryCount;
int expectedTotalRemovalCount = SEGMENT_COUNT * expectedRemovalCountPerSegment;
Assert.assertEquals("Unexpected number of removed entries after having forced out all the entries.", expectedTotalRemovalCount, removedKeys.size());
// Finally, verify that the evicted items are in the correct order (for each segment). See this test's description for details.
for (long segmentId : segmentIds) {
List<CacheKey> segmentRemovedKeys = removedKeys.stream().filter(key -> key.getStreamSegmentId() == segmentId).collect(Collectors.toList());
Assert.assertEquals("Unexpected number of removed entries for segment " + segmentId, expectedRemovalCountPerSegment, segmentRemovedKeys.size());
// The correct order of eviction (N=entriesPerSegment) is: 0.25N-0.75N, 0.75N..N, N..1.25N, 0..0.25N, 1.25N..1.5N.
// This is equivalent to the following tests
// 0.25N-1.25N
checkOffsets(segmentRemovedKeys, segmentId, 0, entriesPerSegment, entriesPerSegment * appendSize / 4, appendSize);
// 0..0.25N
checkOffsets(segmentRemovedKeys, segmentId, entriesPerSegment, entriesPerSegment / 4, 0, appendSize);
// 1.25N..1.5N
checkOffsets(segmentRemovedKeys, segmentId, entriesPerSegment + entriesPerSegment / 4, entriesPerSegment / 4, (int) (entriesPerSegment * appendSize * 1.25), appendSize);
}
}
use of io.pravega.segmentstore.contracts.StreamSegmentNotExistsException in project pravega by pravega.
the class ContainerReadIndexTests method testReadDirect.
/**
* Tests the readDirect() method on the ReadIndex.
*/
@Test
public void testReadDirect() throws Exception {
final int randomAppendLength = 1024;
@Cleanup TestContext context = new TestContext();
ArrayList<Long> segmentIds = new ArrayList<>();
final long segmentId = createSegment(0, context);
final UpdateableSegmentMetadata segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
segmentIds.add(segmentId);
HashMap<Long, ArrayList<Long>> transactionsBySegment = createTransactions(segmentIds, 1, context);
final long mergedTxId = transactionsBySegment.get(segmentId).get(0);
// Add data to all segments.
HashMap<Long, ByteArrayOutputStream> segmentContents = new HashMap<>();
transactionsBySegment.values().forEach(segmentIds::addAll);
appendData(segmentIds, segmentContents, context);
// Mark everything so far (minus a few bytes) as being written to storage.
segmentMetadata.setStorageLength(segmentMetadata.getLength() - 100);
// Now partially merge a second transaction
final long mergedTxOffset = beginMergeTransaction(mergedTxId, segmentMetadata, segmentContents, context);
// Add one more append after all of this.
final long endOfMergedDataOffset = segmentMetadata.getLength();
byte[] appendData = new byte[randomAppendLength];
new Random(0).nextBytes(appendData);
appendSingleWrite(segmentId, appendData, context);
recordAppend(segmentId, appendData, segmentContents);
// Verify we are not allowed to read from the range which has already been committed to Storage (invalid arguments).
for (AtomicLong offset = new AtomicLong(0); offset.get() < segmentMetadata.getStorageLength(); offset.incrementAndGet()) {
AssertExtensions.assertThrows(String.format("readDirect allowed reading from an illegal offset (%s).", offset), () -> context.readIndex.readDirect(segmentId, offset.get(), 1), ex -> ex instanceof IllegalArgumentException);
}
// Verify that any reads overlapping a merged transaction return null (that is, we cannot retrieve the requested data).
for (long offset = mergedTxOffset - 1; offset < endOfMergedDataOffset; offset++) {
InputStream resultStream = context.readIndex.readDirect(segmentId, offset, 2);
Assert.assertNull("readDirect() returned data overlapping a partially merged transaction", resultStream);
}
// Verify that we can read from any other offset.
final byte[] expectedData = segmentContents.get(segmentId).toByteArray();
BiConsumer<Long, Long> verifyReadResult = (startOffset, endOffset) -> {
int readLength = (int) (endOffset - startOffset);
while (readLength > 0) {
InputStream actualDataStream;
try {
actualDataStream = context.readIndex.readDirect(segmentId, startOffset, readLength);
} catch (StreamSegmentNotExistsException ex) {
throw new CompletionException(ex);
}
Assert.assertNotNull(String.format("Unexpected result when data is readily available for Offset = %s, Length = %s.", startOffset, readLength), actualDataStream);
byte[] actualData = new byte[readLength];
try {
int bytesCopied = StreamHelpers.readAll(actualDataStream, actualData, 0, readLength);
Assert.assertEquals(String.format("Unexpected number of bytes read for Offset = %s, Length = %s (pre-partial-merge).", startOffset, readLength), readLength, bytesCopied);
} catch (IOException ex) {
// Technically not possible.
throw new UncheckedIOException(ex);
}
AssertExtensions.assertArrayEquals("Unexpected data read from the segment at offset " + startOffset, expectedData, startOffset.intValue(), actualData, 0, actualData.length);
// Setup the read for the next test (where we read 1 less byte than now).
readLength--;
if (readLength % 2 == 0) {
// For every 2 bytes of decreased read length, increase the start offset by 1. This allows for a greater
// number of combinations to be tested.
startOffset++;
}
}
};
// Verify that we can read the cached data just after the StorageLength but before the merged transaction.
verifyReadResult.accept(segmentMetadata.getStorageLength(), mergedTxOffset);
// Verify that we can read the cached data just after the merged transaction but before the end of the segment.
verifyReadResult.accept(endOfMergedDataOffset, segmentMetadata.getLength());
}
Aggregations