use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.
the class SegmentAggregatorTests method generateMergeTransactionAndUpdateMetadata.
private StorageOperation generateMergeTransactionAndUpdateMetadata(long targetId, long sourceId, TestContext context) {
UpdateableSegmentMetadata sourceMetadata = context.containerMetadata.getStreamSegmentMetadata(sourceId);
UpdateableSegmentMetadata targetMetadata = context.containerMetadata.getStreamSegmentMetadata(targetId);
MergeSegmentOperation op = new MergeSegmentOperation(targetMetadata.getId(), sourceMetadata.getId());
op.setLength(sourceMetadata.getLength());
op.setStreamSegmentOffset(targetMetadata.getLength());
targetMetadata.setLength(targetMetadata.getLength() + sourceMetadata.getLength());
sourceMetadata.markMerged();
op.setSequenceNumber(context.containerMetadata.nextOperationSequenceNumber());
return op;
}
use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.
the class SegmentAggregatorTests method testRecoveryEmptyMergeOperation.
/**
* Tests a scenario where a MergeSegmentOperation needs to be recovered but which has already been merged in Storage.
*/
@Test
public void testRecoveryEmptyMergeOperation() throws Exception {
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
// Create a parent segment and one transaction segment.
context.segmentAggregator.initialize(TIMEOUT).join();
// Part 1: When the source segment is missing from Storage, but metadata does not reflect that.
SegmentAggregator ta0 = context.transactionAggregators[0];
context.storage.create(ta0.getMetadata().getName(), TIMEOUT).join();
context.storage.openWrite(ta0.getMetadata().getName()).thenCompose(txnHandle -> context.storage.seal(txnHandle, TIMEOUT)).join();
val txn0Metadata = context.containerMetadata.getStreamSegmentMetadata(ta0.getMetadata().getId());
txn0Metadata.markSealed();
txn0Metadata.markSealedInStorage();
ta0.initialize(TIMEOUT).join();
context.storage.delete(context.storage.openWrite(txn0Metadata.getName()).join(), TIMEOUT).join();
// This is the operation that should be reconciled.
context.segmentAggregator.add(generateMergeTransactionAndUpdateMetadata(ta0.getMetadata().getId(), context));
// Verify the operation was ack-ed.
AtomicBoolean mergeAcked = new AtomicBoolean();
context.dataSource.setCompleteMergeCallback((target, source) -> mergeAcked.set(true));
context.segmentAggregator.flush(TIMEOUT).join();
Assert.assertTrue("Merge was not ack-ed for deleted source segment.", mergeAcked.get());
// Part 2: When the source segment's metadata indicates it was deleted.
SegmentAggregator ta1 = context.transactionAggregators[1];
context.storage.create(ta1.getMetadata().getName(), TIMEOUT).join();
context.storage.openWrite(ta1.getMetadata().getName()).thenCompose(txnHandle -> context.storage.seal(txnHandle, TIMEOUT)).join();
val txn1Metadata = context.containerMetadata.getStreamSegmentMetadata(ta1.getMetadata().getId());
txn1Metadata.markDeleted();
// This is the operation that should be reconciled.
context.segmentAggregator.add(generateMergeTransactionAndUpdateMetadata(ta1.getMetadata().getId(), context));
// Verify the operation was ack-ed.
mergeAcked.set(false);
context.dataSource.setCompleteMergeCallback((target, source) -> mergeAcked.set(true));
context.segmentAggregator.flush(TIMEOUT).join();
// Finally, verify that all operations were ack-ed back.
Assert.assertTrue("Merge was not ack-ed for deleted source segment.", mergeAcked.get());
}
use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.
the class SegmentAggregatorTests method testAddWithBadInput.
/**
* Tests the add() method with invalid arguments.
*/
@Test
public void testAddWithBadInput() throws Exception {
final long badTransactionId = 12345;
final long badParentId = 56789;
final String badParentName = "Foo_Parent";
final String badTransactionName = "Foo_Transaction";
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
// We only needs one Transaction for this test.
SegmentAggregator transactionAggregator = context.transactionAggregators[0];
SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
context.segmentAggregator.initialize(TIMEOUT).join();
transactionAggregator.initialize(TIMEOUT).join();
// Create 2 more segments that can be used to verify MergeSegmentOperation.
context.containerMetadata.mapStreamSegmentId(badParentName, badParentId);
UpdateableSegmentMetadata badTransactionMetadata = context.containerMetadata.mapStreamSegmentId(badTransactionName, badTransactionId);
badTransactionMetadata.setLength(0);
badTransactionMetadata.setStorageLength(0);
// 1. MergeSegmentOperation
// Verify that MergeSegmentOperation cannot be added to the Segment to be merged.
AssertExtensions.assertThrows("add() allowed a MergeSegmentOperation on the Transaction segment.", () -> transactionAggregator.add(generateSimpleMergeTransaction(transactionMetadata.getId(), context)), ex -> ex instanceof IllegalArgumentException);
// 2. StreamSegmentSealOperation.
// 2a. Verify we cannot add a StreamSegmentSealOperation if the segment is not sealed yet.
AssertExtensions.assertThrows("add() allowed a StreamSegmentSealOperation for a non-sealed segment.", () -> {
@Cleanup SegmentAggregator badTransactionAggregator = new SegmentAggregator(badTransactionMetadata, context.dataSource, context.storage, DEFAULT_CONFIG, context.timer, executorService());
badTransactionAggregator.initialize(TIMEOUT).join();
badTransactionAggregator.add(generateSimpleSeal(badTransactionId, context));
}, ex -> ex instanceof DataCorruptionException);
// 2b. Verify that nothing is allowed after Seal (after adding one append to and sealing the Transaction Segment).
StorageOperation transactionAppend1 = generateAppendAndUpdateMetadata(0, transactionMetadata.getId(), context);
transactionAggregator.add(transactionAppend1);
transactionAggregator.add(generateSealAndUpdateMetadata(transactionMetadata.getId(), context));
AssertExtensions.assertThrows("add() allowed operation after seal.", () -> transactionAggregator.add(generateSimpleAppend(transactionMetadata.getId(), context)), ex -> ex instanceof DataCorruptionException);
// 3. CachedStreamSegmentAppendOperation.
final StorageOperation parentAppend1 = generateAppendAndUpdateMetadata(0, SEGMENT_ID, context);
// 3a. Verify we cannot add StreamSegmentAppendOperations.
AssertExtensions.assertThrows("add() allowed a StreamSegmentAppendOperation.", () -> {
// We have the correct offset, but we did not increase the Length.
StreamSegmentAppendOperation badAppend = new StreamSegmentAppendOperation(parentAppend1.getStreamSegmentId(), parentAppend1.getStreamSegmentOffset(), new ByteArraySegment(new byte[(int) parentAppend1.getLength()]), null);
context.segmentAggregator.add(badAppend);
}, ex -> ex instanceof IllegalArgumentException);
// Add this one append to the parent (nothing unusual here); we'll use this for the next tests.
context.segmentAggregator.add(parentAppend1);
// 3b. Verify we cannot add anything beyond the DurableLogOffset (offset or offset+length).
val appendData = new ByteArraySegment("foo".getBytes());
AssertExtensions.assertThrows("add() allowed an operation beyond the DurableLogOffset (offset).", () -> {
// We have the correct offset, but we did not increase the Length.
StreamSegmentAppendOperation badAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), appendData, null);
badAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badAppend));
}, ex -> ex instanceof DataCorruptionException);
((UpdateableSegmentMetadata) context.segmentAggregator.getMetadata()).setLength(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength() + 1);
AssertExtensions.assertThrows("add() allowed an operation beyond the DurableLogOffset (offset+length).", () -> {
// We have the correct offset, but we the append exceeds the Length by 1 byte.
StreamSegmentAppendOperation badAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), appendData, null);
badAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badAppend));
}, ex -> ex instanceof DataCorruptionException);
// 3c. Verify contiguity (offsets - we cannot have gaps in the data).
AssertExtensions.assertThrows("add() allowed an operation with wrong offset (too small).", () -> {
StreamSegmentAppendOperation badOffsetAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), appendData, null);
badOffsetAppend.setStreamSegmentOffset(0);
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badOffsetAppend));
}, ex -> ex instanceof DataCorruptionException);
AssertExtensions.assertThrows("add() allowed an operation with wrong offset (too large).", () -> {
StreamSegmentAppendOperation badOffsetAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), appendData, null);
badOffsetAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength() + 1);
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badOffsetAppend));
}, ex -> ex instanceof DataCorruptionException);
AssertExtensions.assertThrows("add() allowed an operation with wrong offset (too large, but no pending operations).", () -> {
@Cleanup SegmentAggregator badTransactionAggregator = new SegmentAggregator(badTransactionMetadata, context.dataSource, context.storage, DEFAULT_CONFIG, context.timer, executorService());
badTransactionMetadata.setLength(100);
badTransactionAggregator.initialize(TIMEOUT).join();
StreamSegmentAppendOperation badOffsetAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), appendData, null);
badOffsetAppend.setStreamSegmentOffset(1);
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badOffsetAppend));
}, ex -> ex instanceof DataCorruptionException);
// 4. Verify Segment Id match.
AssertExtensions.assertThrows("add() allowed an Append operation with wrong Segment Id.", () -> {
StreamSegmentAppendOperation badIdAppend = new StreamSegmentAppendOperation(Integer.MAX_VALUE, appendData, null);
badIdAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badIdAppend));
}, ex -> ex instanceof IllegalArgumentException);
AssertExtensions.assertThrows("add() allowed a StreamSegmentSealOperation with wrong SegmentId.", () -> {
StreamSegmentSealOperation badIdSeal = new StreamSegmentSealOperation(Integer.MAX_VALUE);
badIdSeal.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
context.segmentAggregator.add(badIdSeal);
}, ex -> ex instanceof IllegalArgumentException);
AssertExtensions.assertThrows("add() allowed a MergeSegmentOperation with wrong SegmentId.", () -> {
MergeSegmentOperation badIdMerge = new MergeSegmentOperation(Integer.MAX_VALUE, transactionMetadata.getId());
badIdMerge.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
badIdMerge.setLength(1);
context.segmentAggregator.add(badIdMerge);
}, ex -> ex instanceof IllegalArgumentException);
// 5. Truncations.
AssertExtensions.assertThrows("add() allowed a StreamSegmentTruncateOperation with a truncation offset beyond the one in the metadata.", () -> {
StreamSegmentTruncateOperation op = new StreamSegmentTruncateOperation(SEGMENT_ID, 10);
op.setSequenceNumber(context.containerMetadata.nextOperationSequenceNumber());
context.segmentAggregator.add(op);
}, ex -> ex instanceof DataCorruptionException);
}
use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.
the class ContainerMetadataUpdateTransaction method acceptOperation.
/**
* Accepts the given Operation. The Operation's effects are reflected in the pending transaction.
* This method has no effect on Metadata Operations.
* See OperationMetadataUpdater.acceptOperation for more details on behavior.
*
* @param operation The operation to accept.
* @throws MetadataUpdateException If the given operation was rejected given the current state of the metadata.
* @throws NullPointerException If the operation is null.
*/
void acceptOperation(Operation operation) throws MetadataUpdateException {
checkNotSealed();
if (operation instanceof SegmentOperation) {
val segmentMetadata = getSegmentUpdateTransaction(((SegmentOperation) operation).getStreamSegmentId());
segmentMetadata.setLastUsed(operation.getSequenceNumber());
if (operation instanceof StreamSegmentAppendOperation) {
segmentMetadata.acceptOperation((StreamSegmentAppendOperation) operation);
} else if (operation instanceof StreamSegmentSealOperation) {
segmentMetadata.acceptOperation((StreamSegmentSealOperation) operation);
} else if (operation instanceof MergeSegmentOperation) {
MergeSegmentOperation mto = (MergeSegmentOperation) operation;
SegmentMetadataUpdateTransaction sourceMetadata = getSegmentUpdateTransaction(mto.getSourceSegmentId());
sourceMetadata.acceptAsSourceSegment(mto);
sourceMetadata.setLastUsed(operation.getSequenceNumber());
segmentMetadata.acceptAsTargetSegment(mto, sourceMetadata);
} else if (operation instanceof UpdateAttributesOperation) {
segmentMetadata.acceptOperation((UpdateAttributesOperation) operation);
} else if (operation instanceof StreamSegmentTruncateOperation) {
segmentMetadata.acceptOperation((StreamSegmentTruncateOperation) operation);
} else if (operation instanceof DeleteSegmentOperation) {
segmentMetadata.acceptOperation((DeleteSegmentOperation) operation);
}
}
if (operation instanceof CheckpointOperationBase) {
if (operation instanceof MetadataCheckpointOperation) {
// A MetadataCheckpointOperation represents a valid truncation point. Record it as such.
this.newTruncationPoints.add(operation.getSequenceNumber());
}
// Checkpoint operation has been serialized and we no longer need its contents. Clear it and release any
// memory it used.
((CheckpointOperationBase) operation).clearContents();
} else if (operation instanceof StreamSegmentMapOperation) {
acceptMetadataOperation((StreamSegmentMapOperation) operation);
}
}
use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.
the class DurableLogTests method testRecoveryWithIncrementalCheckpoints.
/**
* Tests the DurableLog recovery process when there are multiple {@link MetadataCheckpointOperation}s added, with each
* such checkpoint including information about evicted segments or segments which had their storage state modified.
*/
@Test
public void testRecoveryWithIncrementalCheckpoints() throws Exception {
final int streamSegmentCount = 50;
// Setup a DurableLog and start it.
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()));
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
// First DurableLog. We use this for generating data.
val metadata1 = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup CacheStorage cacheStorage = new DirectMemoryCache(Integer.MAX_VALUE);
@Cleanup CacheManager cacheManager = new CacheManager(CachePolicy.INFINITE, cacheStorage, executorService());
List<Long> deletedIds;
Set<Long> evictIds;
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata1, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata1, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Create some segments.
val segmentIds = new ArrayList<>(createStreamSegmentsWithOperations(streamSegmentCount, durableLog));
deletedIds = segmentIds.subList(0, 5);
val mergedFromIds = segmentIds.subList(5, 10);
// Must be same length as mergeFrom
val mergedToIds = segmentIds.subList(10, 15);
evictIds = new HashSet<>(segmentIds.subList(15, 20));
val changeStorageStateIds = segmentIds.subList(20, segmentIds.size() - 5);
// Append something to each segment.
for (val segmentId : segmentIds) {
if (!evictIds.contains(segmentId)) {
durableLog.add(new StreamSegmentAppendOperation(segmentId, generateAppendData((int) (long) segmentId), null), OperationPriority.Normal, TIMEOUT).join();
}
}
// Checkpoint 1.
durableLog.checkpoint(TIMEOUT).join();
// Delete some segments.
for (val segmentId : deletedIds) {
durableLog.add(new DeleteSegmentOperation(segmentId), OperationPriority.Normal, TIMEOUT).join();
}
// Checkpoint 2.
durableLog.checkpoint(TIMEOUT).join();
// Merge some segments.
for (int i = 0; i < mergedFromIds.size(); i++) {
durableLog.add(new StreamSegmentSealOperation(mergedFromIds.get(i)), OperationPriority.Normal, TIMEOUT).join();
durableLog.add(new MergeSegmentOperation(mergedToIds.get(i), mergedFromIds.get(i)), OperationPriority.Normal, TIMEOUT).join();
}
// Checkpoint 3.
durableLog.checkpoint(TIMEOUT).join();
// Evict some segments.
val evictableContainerMetadata = (EvictableMetadata) metadata1;
metadata1.removeTruncationMarkers(metadata1.getOperationSequenceNumber());
val toEvict = evictableContainerMetadata.getEvictionCandidates(Integer.MAX_VALUE, segmentIds.size()).stream().filter(m -> evictIds.contains(m.getId())).collect(Collectors.toList());
val evicted = evictableContainerMetadata.cleanup(toEvict, Integer.MAX_VALUE);
AssertExtensions.assertContainsSameElements("", evictIds, evicted.stream().map(SegmentMetadata::getId).collect(Collectors.toList()));
// Checkpoint 4.
durableLog.checkpoint(TIMEOUT).join();
// Update storage state for some segments.
for (val segmentId : changeStorageStateIds) {
val sm = metadata1.getStreamSegmentMetadata(segmentId);
if (segmentId % 3 == 0) {
sm.setStorageLength(sm.getLength());
}
if (segmentId % 4 == 0) {
sm.markSealed();
sm.markSealedInStorage();
}
if (segmentId % 5 == 0) {
sm.markDeleted();
sm.markDeletedInStorage();
}
}
// Checkpoint 5.
durableLog.checkpoint(TIMEOUT).join();
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
// Second DurableLog. We use this for recovery.
val metadata2 = new MetadataBuilder(CONTAINER_ID).build();
try (ContainerReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata2, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata2, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Validate metadata matches.
val expectedSegmentIds = metadata1.getAllStreamSegmentIds();
val actualSegmentIds = metadata2.getAllStreamSegmentIds();
AssertExtensions.assertContainsSameElements("Unexpected set of recovered segments. Only Active segments expected to have been recovered.", expectedSegmentIds, actualSegmentIds);
val expectedSegments = expectedSegmentIds.stream().sorted().map(metadata1::getStreamSegmentMetadata).collect(Collectors.toList());
val actualSegments = actualSegmentIds.stream().sorted().map(metadata2::getStreamSegmentMetadata).collect(Collectors.toList());
for (int i = 0; i < expectedSegments.size(); i++) {
val e = expectedSegments.get(i);
val a = actualSegments.get(i);
SegmentMetadataComparer.assertEquals("Recovered segment metadata mismatch", e, a);
}
// Validate read index is as it should. Here, we can only check if the read indices for evicted segments are
// no longer loaded; we do more thorough checks in the ContainerReadIndexTests suite.
Streams.concat(evictIds.stream(), deletedIds.stream()).forEach(segmentId -> Assert.assertNull("Not expecting a read index for an evicted or deleted segment.", readIndex.getIndex(segmentId)));
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
}
Aggregations