use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class ContainerReadIndexTests method testConcurrentReadTransactionStorageMerge.
/**
* Tests the following scenario, where the Read Index has a read from a portion in a parent segment where a transaction
* was just merged (fully in storage), but the read request might result in either an ObjectClosedException or
* StreamSegmentNotExistsException:
* * A Parent Segment has a Transaction with some data in it, and at least 1 byte of data not in cache.
* * The Transaction is begin-merged in the parent (Tier 1 only).
* * A Read Request is issued to the Parent for the range of data from the Transaction, which includes the 1 byte not in cache.
* * The Transaction is fully merged (Tier 2).
* * The Read Request is invoked and its content requested. This should correctly retrieve the data from the Parent
* Segment in Storage, and not attempt to access the now-defunct Transaction segment.
*/
@Test
public void testConcurrentReadTransactionStorageMerge() throws Exception {
CachePolicy cachePolicy = new CachePolicy(1, Duration.ZERO, Duration.ofMillis(1));
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, cachePolicy);
// Create parent segment and one transaction
long parentId = createSegment(0, context);
UpdateableSegmentMetadata parentMetadata = context.metadata.getStreamSegmentMetadata(parentId);
long transactionId = createTransaction(parentMetadata, 1, context);
UpdateableSegmentMetadata transactionMetadata = context.metadata.getStreamSegmentMetadata(transactionId);
createSegmentsInStorage(context);
// Write something to the transaction, and make sure it also makes its way to Storage.
byte[] writeData = getAppendData(transactionMetadata.getName(), transactionId, 0, 0);
appendSingleWrite(transactionId, writeData, context);
val transactionWriteHandle = context.storage.openWrite(transactionMetadata.getName()).join();
context.storage.write(transactionWriteHandle, 0, new ByteArrayInputStream(writeData), writeData.length, TIMEOUT).join();
transactionMetadata.setStorageLength(transactionMetadata.getLength());
// Seal & Begin-merge the transaction (do not seal in storage).
transactionMetadata.markSealed();
parentMetadata.setLength(transactionMetadata.getLength());
context.readIndex.beginMerge(parentId, 0, transactionId);
transactionMetadata.markMerged();
// Clear the cache.
context.cacheManager.applyCachePolicy();
// Issue read from the parent and fetch the first entry (there should only be one).
ReadResult rr = context.readIndex.read(parentId, 0, writeData.length, TIMEOUT);
Assert.assertTrue("Parent Segment read indicates no data available.", rr.hasNext());
ReadResultEntry entry = rr.next();
Assert.assertEquals("Unexpected offset for read result entry.", 0, entry.getStreamSegmentOffset());
Assert.assertEquals("Served read result entry is not from storage.", ReadResultEntryType.Storage, entry.getType());
// Merge the transaction in storage & complete-merge it.
transactionMetadata.markSealed();
transactionMetadata.markSealedInStorage();
transactionMetadata.markDeleted();
context.storage.seal(transactionWriteHandle, TIMEOUT).join();
val parentWriteHandle = context.storage.openWrite(parentMetadata.getName()).join();
context.storage.concat(parentWriteHandle, 0, transactionWriteHandle.getSegmentName(), TIMEOUT).join();
parentMetadata.setStorageLength(parentMetadata.getLength());
context.readIndex.completeMerge(parentId, transactionId);
// Attempt to extract data from the read.
entry.requestContent(TIMEOUT);
ReadResultEntryContents contents = entry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
byte[] readData = new byte[contents.getLength()];
StreamHelpers.readAll(contents.getData(), readData, 0, readData.length);
Assert.assertArrayEquals("Unexpected data read from parent segment.", writeData, readData);
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class StreamSegmentMapperTests method testGetOrAssignStreamSegmentIdWithConcurrency.
/**
* Tests the ability of the StreamSegmentMapper to generate/return the Id of an existing StreamSegment, with concurrent requests.
* Also tests the ability to execute such callbacks in the order in which they were received.
*/
@Test
public void testGetOrAssignStreamSegmentIdWithConcurrency() throws Exception {
// We setup a delay in the OperationLog process. We only do this for a stand-alone StreamSegment because the process
// is driven by the same code for Transactions as well.
final String segmentName = "Segment";
final long segmentId = 12345;
final String firstResult = "first";
final String secondResult = "second";
final String thirdResult = "third";
HashSet<String> storageSegments = new HashSet<>();
storageSegments.add(segmentName);
@Cleanup TestContext context = new TestContext();
setupStorageGetHandler(context, storageSegments, sn -> StreamSegmentInformation.builder().name(sn).build());
CompletableFuture<Void> initialAddFuture = new CompletableFuture<>();
CompletableFuture<Void> addInvoked = new CompletableFuture<>();
AtomicBoolean operationLogInvoked = new AtomicBoolean(false);
context.operationLog.addHandler = op -> {
if (!(op instanceof StreamSegmentMapOperation)) {
return Futures.failedFuture(new IllegalArgumentException("unexpected operation"));
}
if (operationLogInvoked.getAndSet(true)) {
return Futures.failedFuture(new IllegalStateException("multiple calls to OperationLog.add"));
}
// Need to set SegmentId on operation.
((StreamSegmentMapOperation) op).setStreamSegmentId(segmentId);
UpdateableSegmentMetadata segmentMetadata = context.metadata.mapStreamSegmentId(segmentName, segmentId);
segmentMetadata.setStorageLength(0);
segmentMetadata.setLength(0);
addInvoked.complete(null);
return initialAddFuture;
};
List<Integer> invocationOrder = Collections.synchronizedList(new ArrayList<>());
// Second call is designed to hit when the first call still tries to assign the id, hence we test normal queueing.
CompletableFuture<String> firstCall = context.mapper.getOrAssignStreamSegmentId(segmentName, TIMEOUT, id -> {
Assert.assertEquals("Unexpected SegmentId (first).", segmentId, (long) id);
invocationOrder.add(1);
return CompletableFuture.completedFuture(firstResult);
});
CompletableFuture<String> secondCall = context.mapper.getOrAssignStreamSegmentId(segmentName, TIMEOUT, id -> {
Assert.assertEquals("Unexpected SegmentId (second).", segmentId, (long) id);
invocationOrder.add(2);
return CompletableFuture.completedFuture(secondResult);
});
// Wait for the metadata to be updated properly.
addInvoked.join();
Assert.assertFalse("getOrAssignStreamSegmentId (first call) returned before OperationLog finished.", firstCall.isDone());
Assert.assertFalse("getOrAssignStreamSegmentId (second call) returned before OperationLog finished.", secondCall.isDone());
// Third call is designed to hit after the metadata has been updated, but prior to the other callbacks being invoked.
// It verifies that even in that case it still executes in order.
CompletableFuture<String> thirdCall = context.mapper.getOrAssignStreamSegmentId(segmentName, TIMEOUT, id -> {
Assert.assertEquals("Unexpected SegmentId (second).", segmentId, (long) id);
invocationOrder.add(3);
return CompletableFuture.completedFuture(thirdResult);
});
initialAddFuture.complete(null);
Assert.assertEquals("Unexpected result from firstCall.", firstResult, firstCall.join());
Assert.assertEquals("Unexpected result from secondCall.", secondResult, secondCall.join());
Assert.assertEquals("Unexpected result from thirdCall.", thirdResult, thirdCall.join());
val expectedOrder = Arrays.asList(1, 2, 3);
AssertExtensions.assertListEquals("", expectedOrder, invocationOrder, Integer::equals);
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class ContainerMetadataUpdateTransactionTests method testPreProcessAndAcceptWithInvalidSegmentId.
// endregion
// region Other tests
/**
* Tests the behavior of preProcessOperation and acceptOperation when encountering an invalid StreamSegmentId, or
* when encountering a StreamSegment Id for a deleted StreamSegment.
*/
@Test
public void testPreProcessAndAcceptWithInvalidSegmentId() throws Exception {
UpdateableContainerMetadata metadata = createBlankMetadata();
val txn = createUpdateTransaction(metadata);
ArrayList<StorageOperation> testOperations = new ArrayList<>();
testOperations.add(createAppendNoOffset());
testOperations.add(createSeal());
testOperations.add(createMerge());
for (StorageOperation op : testOperations) {
AssertExtensions.assertThrows("Unexpected behavior from preProcessOperation when processing an operation for a non-existent Segment: " + op, () -> txn.preProcessOperation(op), ex -> ex instanceof MetadataUpdateException);
AssertExtensions.assertThrows("Unexpected behavior from acceptOperation when processing an operation for a non-existent Segment: " + op, () -> txn.acceptOperation(op), ex -> ex instanceof MetadataUpdateException);
}
// If the StreamSegment was previously marked as deleted.
UpdateableSegmentMetadata segmentMetadata = metadata.mapStreamSegmentId("foo", SEGMENT_ID);
segmentMetadata.markDeleted();
for (StorageOperation op : testOperations) {
AssertExtensions.assertThrows("Unexpected behavior from preProcessOperation when processing an operation for deleted Segment: " + op, () -> txn.preProcessOperation(op), ex -> ex instanceof StreamSegmentNotExistsException);
}
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class ContainerMetadataUpdateTransactionTests method testAcceptStreamSegmentSeal.
/**
* Tests the accept method with StreamSegmentSeal operations.
*/
@Test
public void testAcceptStreamSegmentSeal() throws Exception {
UpdateableContainerMetadata metadata = createMetadata();
// Set some attributes.
val segmentAttributes = createAttributes();
segmentAttributes.put(Attributes.CREATION_TIME, 1L);
UpdateableSegmentMetadata segmentMetadata = metadata.getStreamSegmentMetadata(SEGMENT_ID);
segmentMetadata.updateAttributes(segmentAttributes);
val txn = createUpdateTransaction(metadata);
StreamSegmentSealOperation sealOp = createSeal();
// When no pre-process has happened.
AssertExtensions.assertThrows("Unexpected behavior from acceptOperation() when no pre-processing was made.", () -> txn.acceptOperation(sealOp), ex -> ex instanceof MetadataUpdateException);
Assert.assertFalse("acceptOperation updated the transaction even if it threw an exception.", txn.getStreamSegmentMetadata(SEGMENT_ID).isSealed());
Assert.assertFalse("acceptOperation updated the metadata.", metadata.getStreamSegmentMetadata(SEGMENT_ID).isSealed());
// When all is good.
txn.preProcessOperation(sealOp);
txn.acceptOperation(sealOp);
Assert.assertTrue("acceptOperation did not update the transaction.", txn.getStreamSegmentMetadata(SEGMENT_ID).isSealed());
Assert.assertFalse("acceptOperation updated the metadata.", segmentMetadata.isSealed());
txn.commit(metadata);
// Check attributes.
// All dynamic attributes should be removed.
segmentAttributes.keySet().removeIf(Attributes::isDynamic);
SegmentMetadataComparer.assertSameAttributes("Unexpected set of attributes after commit.", segmentAttributes, segmentMetadata);
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class DurableLogTests method testTailReadsTimeout.
/**
* Tests the ability to timeout tail reads. This does not actually test the functionality of tail reads - it just
* tests that they will time out appropriately.
*/
@Test
public void testTailReadsTimeout() {
final long segmentId = 1;
final String segmentName = Long.toString(segmentId);
// Setup a DurableLog and start it.
@Cleanup ContainerSetup setup = new ContainerSetup(executorService());
@Cleanup DurableLog durableLog = setup.createDurableLog();
durableLog.startAsync().awaitRunning();
// Create a segment, which will be used for testing later.
UpdateableSegmentMetadata segmentMetadata = setup.metadata.mapStreamSegmentId(segmentName, segmentId);
segmentMetadata.setLength(0);
Duration shortTimeout = Duration.ofMillis(30);
// Setup a read operation, and make sure it is blocked (since there is no data).
CompletableFuture<Iterator<Operation>> readFuture = durableLog.read(1, 1, shortTimeout);
Assert.assertFalse("read() returned a completed future when there is no data available.", Futures.isSuccessful(readFuture));
CompletableFuture<Void> controlFuture = Futures.delayedFuture(Duration.ofMillis(2000), setup.executorService);
AssertExtensions.assertThrows("Future from read() operation did not fail with a TimeoutException after the timeout expired.", () -> CompletableFuture.anyOf(controlFuture, readFuture), ex -> ex instanceof TimeoutException);
}
Aggregations