use of io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation in project pravega by pravega.
the class SegmentAggregatorTests method testDelete.
/**
* Tests the ability to process a {@link DeleteSegmentOperation} on Segments in various states:
* - Empty (not yet created).
* - Empty (created, but no data).
* - Not empty, not sealed.
* - Sealed (empty or not).
*/
@Test
public void testDelete() throws Exception {
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
val notCreated = context.transactionAggregators[0];
val empty = context.transactionAggregators[1];
val notSealed = context.transactionAggregators[2];
val sealed = context.transactionAggregators[3];
val withMergers = context.transactionAggregators[4];
val withMergerSource = context.transactionAggregators[5];
val emptyWithAttributes = context.transactionAggregators[6];
val allAggregators = new SegmentAggregator[] { notCreated, empty, notSealed, sealed, withMergers, emptyWithAttributes };
// Create the segments that are supposed to exist in Storage.
Stream.of(empty, notSealed, sealed).forEach(a -> context.storage.create(a.getMetadata().getName(), TIMEOUT).join());
// Write 1 byte to the non-empty segment and add 1 attribute.
context.storage.openWrite(notSealed.getMetadata().getName()).thenCompose(handle -> context.storage.write(handle, 0, new ByteArrayInputStream(new byte[] { 1 }), 1, TIMEOUT)).join();
((UpdateableSegmentMetadata) notSealed.getMetadata()).setLength(1L);
context.dataSource.persistAttributes(notSealed.getMetadata().getId(), Collections.singletonMap(AttributeId.randomUUID(), 1L), TIMEOUT).join();
// Seal the sealed segment.
((UpdateableSegmentMetadata) sealed.getMetadata()).markSealed();
context.storage.openWrite(sealed.getMetadata().getName()).thenCompose(handle -> context.storage.seal(handle, TIMEOUT)).join();
context.dataSource.persistAttributes(sealed.getMetadata().getId(), Collections.singletonMap(AttributeId.randomUUID(), 1L), TIMEOUT).join();
// Create a source segment; we'll verify this was also deleted when its target was.
context.storage.create(withMergerSource.getMetadata().getName(), TIMEOUT).join();
context.dataSource.persistAttributes(withMergerSource.getMetadata().getId(), Collections.singletonMap(AttributeId.randomUUID(), 2L), TIMEOUT).join();
// This segment has an attribute index, but no segment has been created yet (since no data has been written to it).
context.dataSource.persistAttributes(emptyWithAttributes.getMetadata().getId(), Collections.singletonMap(AttributeId.randomUUID(), 3L), TIMEOUT).join();
for (val a : allAggregators) {
// Initialize the Aggregator and add the DeleteSegmentOperation.
a.initialize(TIMEOUT).join();
if (a == withMergers) {
// Add a merged segment to this one, but not before adding an arbitrary operation.
withMergers.add(generateAppendAndUpdateMetadata(1, withMergers.getMetadata().getId(), context));
a.add(generateMergeTransactionAndUpdateMetadata(withMergers.getMetadata().getId(), withMergerSource.getMetadata().getId(), context));
}
a.add(generateDeleteAndUpdateMetadata(a.getMetadata().getId(), context));
AssertExtensions.assertGreaterThan("Unexpected LUSN before flush.", 0, a.getLowestUncommittedSequenceNumber());
Assert.assertTrue("Unexpected value from mustFlush() when DeletedSegmentOperation queued up.", a.mustFlush());
// Flush everything.
a.flush(TIMEOUT).join();
Assert.assertFalse("Unexpected value from mustFlush() after Deletion.", a.mustFlush());
AssertExtensions.assertLessThan("Unexpected LUSN after flush.", 0, a.getLowestUncommittedSequenceNumber());
Assert.assertTrue("Unexpected value from isDeleted() after Deletion.", a.getMetadata().isDeleted());
Assert.assertTrue("Unexpected value from isDeletedInStorage() after Deletion.", a.getMetadata().isDeletedInStorage());
// Verify that no segment exists in Storage after the flush.
boolean existsInStorage = context.storage.exists(a.getMetadata().getName(), TIMEOUT).join();
Assert.assertFalse("Segment still exists in Storage after Deletion.", existsInStorage);
}
Assert.assertFalse("Pending merger source segment not deleted.", context.storage.exists(withMergerSource.getMetadata().getName(), TIMEOUT).join());
Assert.assertTrue("Attributes not deleted for non-merged segment.", context.dataSource.getPersistedAttributes(notSealed.getMetadata().getId()).isEmpty());
Assert.assertTrue("Attributes not deleted for merger source segment.", context.dataSource.getPersistedAttributes(withMergerSource.getMetadata().getId()).isEmpty());
Assert.assertTrue("Attributes not deleted for empty segment with attributes.", context.dataSource.getPersistedAttributes(emptyWithAttributes.getMetadata().getId()).isEmpty());
}
use of io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation in project pravega by pravega.
the class SegmentAggregatorTests method testReconcileDelete.
/**
* Tests the ability to reconcile a {@link DeleteSegmentOperation} on Segments in various states:
* - Empty (not yet created).
* - Empty (created, but no data).
* - Not empty, not sealed.
* - Sealed (empty or not).
*
* Reconciling a {@link DeleteSegmentOperation} is different from any other operation. Even if there are other
* operations to reconcile, the simple presence of a Delete will bypass any other one and simply delete the segment.
*/
@Test
public void testReconcileDelete() throws Exception {
final int appendLength = DEFAULT_CONFIG.getFlushThresholdBytes();
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
val notExistsWithAppend = context.transactionAggregators[0];
val existsWithAppend = context.transactionAggregators[1];
val existsWithSeal = context.transactionAggregators[2];
val allAggregators = new SegmentAggregator[] { notExistsWithAppend, existsWithAppend, existsWithSeal };
for (val a : allAggregators) {
// Create the segment, and add 1 byte to it. This will cause initialize() to not treat it as empty.
context.storage.create(a.getMetadata().getName(), TIMEOUT).thenCompose(v -> context.storage.openWrite(a.getMetadata().getName())).thenCompose(handle -> {
((UpdateableSegmentMetadata) a.getMetadata()).setLength(1L);
((UpdateableSegmentMetadata) a.getMetadata()).setStorageLength(1L);
return context.storage.write(handle, 0, new ByteArrayInputStream(new byte[] { 1 }), 1, TIMEOUT);
}).thenCompose(v -> a.initialize(TIMEOUT)).join();
// Add enough data to trigger a flush.
a.add(generateAppendAndUpdateMetadata(a.getMetadata().getId(), new byte[appendLength], context));
if (a == existsWithSeal) {
// Add a Seal for that segment that should be sealed.
a.add(generateSealAndUpdateMetadata(existsWithSeal.getMetadata().getId(), context));
}
// Delete the Segment from Storage.
Futures.exceptionallyExpecting(context.storage.openWrite(a.getMetadata().getName()).thenCompose(handle -> context.storage.delete(handle, TIMEOUT)), ex -> ex instanceof StreamSegmentNotExistsException, null).join();
Assert.assertTrue("Unexpected value from mustFlush() before first flush().", a.mustFlush());
// First attempt should fail.
AssertExtensions.assertSuppliedFutureThrows("First invocation of flush() should fail.", () -> a.flush(TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
Assert.assertTrue("Unexpected value from mustFlush() after failed flush().", a.mustFlush());
// Add the DeleteSegmentOperation - this should cause reconciliation to succeed.
a.add(generateDeleteAndUpdateMetadata(a.getMetadata().getId(), context));
a.flush(TIMEOUT).join();
Assert.assertFalse("Unexpected value from mustFlush() after Deletion.", a.mustFlush());
AssertExtensions.assertLessThan("Unexpected LUSN after flush.", 0, a.getLowestUncommittedSequenceNumber());
Assert.assertTrue("Unexpected value from isDeleted() after Deletion.", a.getMetadata().isDeleted());
Assert.assertTrue("Unexpected value from isDeletedInStorage() after Deletion.", a.getMetadata().isDeletedInStorage());
// Verify that no segment exists in Storage after the flush.
boolean existsInStorage = context.storage.exists(a.getMetadata().getName(), TIMEOUT).join();
Assert.assertFalse("Segment still exists in Storage after Deletion.", existsInStorage);
}
}
use of io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation in project pravega by pravega.
the class ContainerMetadataUpdateTransaction method acceptOperation.
/**
* Accepts the given Operation. The Operation's effects are reflected in the pending transaction.
* This method has no effect on Metadata Operations.
* See OperationMetadataUpdater.acceptOperation for more details on behavior.
*
* @param operation The operation to accept.
* @throws MetadataUpdateException If the given operation was rejected given the current state of the metadata.
* @throws NullPointerException If the operation is null.
*/
void acceptOperation(Operation operation) throws MetadataUpdateException {
checkNotSealed();
if (operation instanceof SegmentOperation) {
val segmentMetadata = getSegmentUpdateTransaction(((SegmentOperation) operation).getStreamSegmentId());
segmentMetadata.setLastUsed(operation.getSequenceNumber());
if (operation instanceof StreamSegmentAppendOperation) {
segmentMetadata.acceptOperation((StreamSegmentAppendOperation) operation);
} else if (operation instanceof StreamSegmentSealOperation) {
segmentMetadata.acceptOperation((StreamSegmentSealOperation) operation);
} else if (operation instanceof MergeSegmentOperation) {
MergeSegmentOperation mto = (MergeSegmentOperation) operation;
SegmentMetadataUpdateTransaction sourceMetadata = getSegmentUpdateTransaction(mto.getSourceSegmentId());
sourceMetadata.acceptAsSourceSegment(mto);
sourceMetadata.setLastUsed(operation.getSequenceNumber());
segmentMetadata.acceptAsTargetSegment(mto, sourceMetadata);
} else if (operation instanceof UpdateAttributesOperation) {
segmentMetadata.acceptOperation((UpdateAttributesOperation) operation);
} else if (operation instanceof StreamSegmentTruncateOperation) {
segmentMetadata.acceptOperation((StreamSegmentTruncateOperation) operation);
} else if (operation instanceof DeleteSegmentOperation) {
segmentMetadata.acceptOperation((DeleteSegmentOperation) operation);
}
}
if (operation instanceof CheckpointOperationBase) {
if (operation instanceof MetadataCheckpointOperation) {
// A MetadataCheckpointOperation represents a valid truncation point. Record it as such.
this.newTruncationPoints.add(operation.getSequenceNumber());
}
// Checkpoint operation has been serialized and we no longer need its contents. Clear it and release any
// memory it used.
((CheckpointOperationBase) operation).clearContents();
} else if (operation instanceof StreamSegmentMapOperation) {
acceptMetadataOperation((StreamSegmentMapOperation) operation);
}
}
use of io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation in project pravega by pravega.
the class DurableLogTests method testRecoveryWithIncrementalCheckpoints.
/**
* Tests the DurableLog recovery process when there are multiple {@link MetadataCheckpointOperation}s added, with each
* such checkpoint including information about evicted segments or segments which had their storage state modified.
*/
@Test
public void testRecoveryWithIncrementalCheckpoints() throws Exception {
final int streamSegmentCount = 50;
// Setup a DurableLog and start it.
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()));
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
// First DurableLog. We use this for generating data.
val metadata1 = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup CacheStorage cacheStorage = new DirectMemoryCache(Integer.MAX_VALUE);
@Cleanup CacheManager cacheManager = new CacheManager(CachePolicy.INFINITE, cacheStorage, executorService());
List<Long> deletedIds;
Set<Long> evictIds;
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata1, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata1, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Create some segments.
val segmentIds = new ArrayList<>(createStreamSegmentsWithOperations(streamSegmentCount, durableLog));
deletedIds = segmentIds.subList(0, 5);
val mergedFromIds = segmentIds.subList(5, 10);
// Must be same length as mergeFrom
val mergedToIds = segmentIds.subList(10, 15);
evictIds = new HashSet<>(segmentIds.subList(15, 20));
val changeStorageStateIds = segmentIds.subList(20, segmentIds.size() - 5);
// Append something to each segment.
for (val segmentId : segmentIds) {
if (!evictIds.contains(segmentId)) {
durableLog.add(new StreamSegmentAppendOperation(segmentId, generateAppendData((int) (long) segmentId), null), OperationPriority.Normal, TIMEOUT).join();
}
}
// Checkpoint 1.
durableLog.checkpoint(TIMEOUT).join();
// Delete some segments.
for (val segmentId : deletedIds) {
durableLog.add(new DeleteSegmentOperation(segmentId), OperationPriority.Normal, TIMEOUT).join();
}
// Checkpoint 2.
durableLog.checkpoint(TIMEOUT).join();
// Merge some segments.
for (int i = 0; i < mergedFromIds.size(); i++) {
durableLog.add(new StreamSegmentSealOperation(mergedFromIds.get(i)), OperationPriority.Normal, TIMEOUT).join();
durableLog.add(new MergeSegmentOperation(mergedToIds.get(i), mergedFromIds.get(i)), OperationPriority.Normal, TIMEOUT).join();
}
// Checkpoint 3.
durableLog.checkpoint(TIMEOUT).join();
// Evict some segments.
val evictableContainerMetadata = (EvictableMetadata) metadata1;
metadata1.removeTruncationMarkers(metadata1.getOperationSequenceNumber());
val toEvict = evictableContainerMetadata.getEvictionCandidates(Integer.MAX_VALUE, segmentIds.size()).stream().filter(m -> evictIds.contains(m.getId())).collect(Collectors.toList());
val evicted = evictableContainerMetadata.cleanup(toEvict, Integer.MAX_VALUE);
AssertExtensions.assertContainsSameElements("", evictIds, evicted.stream().map(SegmentMetadata::getId).collect(Collectors.toList()));
// Checkpoint 4.
durableLog.checkpoint(TIMEOUT).join();
// Update storage state for some segments.
for (val segmentId : changeStorageStateIds) {
val sm = metadata1.getStreamSegmentMetadata(segmentId);
if (segmentId % 3 == 0) {
sm.setStorageLength(sm.getLength());
}
if (segmentId % 4 == 0) {
sm.markSealed();
sm.markSealedInStorage();
}
if (segmentId % 5 == 0) {
sm.markDeleted();
sm.markDeletedInStorage();
}
}
// Checkpoint 5.
durableLog.checkpoint(TIMEOUT).join();
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
// Second DurableLog. We use this for recovery.
val metadata2 = new MetadataBuilder(CONTAINER_ID).build();
try (ContainerReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata2, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata2, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Validate metadata matches.
val expectedSegmentIds = metadata1.getAllStreamSegmentIds();
val actualSegmentIds = metadata2.getAllStreamSegmentIds();
AssertExtensions.assertContainsSameElements("Unexpected set of recovered segments. Only Active segments expected to have been recovered.", expectedSegmentIds, actualSegmentIds);
val expectedSegments = expectedSegmentIds.stream().sorted().map(metadata1::getStreamSegmentMetadata).collect(Collectors.toList());
val actualSegments = actualSegmentIds.stream().sorted().map(metadata2::getStreamSegmentMetadata).collect(Collectors.toList());
for (int i = 0; i < expectedSegments.size(); i++) {
val e = expectedSegments.get(i);
val a = actualSegments.get(i);
SegmentMetadataComparer.assertEquals("Recovered segment metadata mismatch", e, a);
}
// Validate read index is as it should. Here, we can only check if the read indices for evicted segments are
// no longer loaded; we do more thorough checks in the ContainerReadIndexTests suite.
Streams.concat(evictIds.stream(), deletedIds.stream()).forEach(segmentId -> Assert.assertNull("Not expecting a read index for an evicted or deleted segment.", readIndex.getIndex(segmentId)));
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
}
use of io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation in project pravega by pravega.
the class DataRecoveryTest method testLogEditOperationObject.
@Test
public void testLogEditOperationObject() throws IOException {
// Setup command object.
STATE.set(new AdminCommandState());
Properties pravegaProperties = new Properties();
pravegaProperties.setProperty("pravegaservice.container.count", "1");
pravegaProperties.setProperty("pravegaservice.clusterName", "pravega0");
STATE.get().getConfigBuilder().include(pravegaProperties);
// Delete Edit Operations should not take into account the newOperation field doing equality.
Assert.assertEquals(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.DELETE_OPERATION, 1, 2, null), new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.DELETE_OPERATION, 1, 2, new DeleteSegmentOperation(1)));
Assert.assertEquals(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.DELETE_OPERATION, 1, 2, null).hashCode(), new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.DELETE_OPERATION, 1, 2, new DeleteSegmentOperation(1)).hashCode());
// Other cases for equality of operations.
Assert.assertEquals(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, null), new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, null));
Assert.assertEquals(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, new DeleteSegmentOperation(1)), new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, new DeleteSegmentOperation(1)));
// Equality of payload operations are checked by type and sequence number, which are the common attributes of Operation class.
DurableDataLogRepairCommand.LogEditOperation deleteOp = new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, new DeleteSegmentOperation(2));
Assert.assertEquals(deleteOp, new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, new DeleteSegmentOperation(1)));
deleteOp.getNewOperation().resetSequenceNumber(123);
Assert.assertNotEquals(deleteOp, new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, new DeleteSegmentOperation(1)));
// Test the cases for the same object reference and for null comparison.
DurableDataLogRepairCommand.LogEditOperation sameOp = new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, new DeleteSegmentOperation(1));
Assert.assertEquals(sameOp, sameOp);
Assert.assertNotEquals(sameOp, null);
Assert.assertNotEquals(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, null), new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, new DeleteSegmentOperation(1)));
Assert.assertNotEquals(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.REPLACE_OPERATION, 1, 2, null), new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, new DeleteSegmentOperation(1)));
Assert.assertNotEquals(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, null), new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 2, 2, new DeleteSegmentOperation(1)));
Assert.assertNotEquals(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, new DeleteSegmentOperation(1)), new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 1, new DeleteSegmentOperation(1)));
}
Aggregations