Search in sources :

Example 6 with DeleteSegmentOperation

use of io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation in project pravega by pravega.

the class SegmentAggregatorTests method testDelete.

/**
 * Tests the ability to process a {@link DeleteSegmentOperation} on Segments in various states:
 * - Empty (not yet created).
 * - Empty (created, but no data).
 * - Not empty, not sealed.
 * - Sealed (empty or not).
 */
@Test
public void testDelete() throws Exception {
    @Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
    val notCreated = context.transactionAggregators[0];
    val empty = context.transactionAggregators[1];
    val notSealed = context.transactionAggregators[2];
    val sealed = context.transactionAggregators[3];
    val withMergers = context.transactionAggregators[4];
    val withMergerSource = context.transactionAggregators[5];
    val emptyWithAttributes = context.transactionAggregators[6];
    val allAggregators = new SegmentAggregator[] { notCreated, empty, notSealed, sealed, withMergers, emptyWithAttributes };
    // Create the segments that are supposed to exist in Storage.
    Stream.of(empty, notSealed, sealed).forEach(a -> context.storage.create(a.getMetadata().getName(), TIMEOUT).join());
    // Write 1 byte to the non-empty segment and add 1 attribute.
    context.storage.openWrite(notSealed.getMetadata().getName()).thenCompose(handle -> context.storage.write(handle, 0, new ByteArrayInputStream(new byte[] { 1 }), 1, TIMEOUT)).join();
    ((UpdateableSegmentMetadata) notSealed.getMetadata()).setLength(1L);
    context.dataSource.persistAttributes(notSealed.getMetadata().getId(), Collections.singletonMap(AttributeId.randomUUID(), 1L), TIMEOUT).join();
    // Seal the sealed segment.
    ((UpdateableSegmentMetadata) sealed.getMetadata()).markSealed();
    context.storage.openWrite(sealed.getMetadata().getName()).thenCompose(handle -> context.storage.seal(handle, TIMEOUT)).join();
    context.dataSource.persistAttributes(sealed.getMetadata().getId(), Collections.singletonMap(AttributeId.randomUUID(), 1L), TIMEOUT).join();
    // Create a source segment; we'll verify this was also deleted when its target was.
    context.storage.create(withMergerSource.getMetadata().getName(), TIMEOUT).join();
    context.dataSource.persistAttributes(withMergerSource.getMetadata().getId(), Collections.singletonMap(AttributeId.randomUUID(), 2L), TIMEOUT).join();
    // This segment has an attribute index, but no segment has been created yet (since no data has been written to it).
    context.dataSource.persistAttributes(emptyWithAttributes.getMetadata().getId(), Collections.singletonMap(AttributeId.randomUUID(), 3L), TIMEOUT).join();
    for (val a : allAggregators) {
        // Initialize the Aggregator and add the DeleteSegmentOperation.
        a.initialize(TIMEOUT).join();
        if (a == withMergers) {
            // Add a merged segment to this one, but not before adding an arbitrary operation.
            withMergers.add(generateAppendAndUpdateMetadata(1, withMergers.getMetadata().getId(), context));
            a.add(generateMergeTransactionAndUpdateMetadata(withMergers.getMetadata().getId(), withMergerSource.getMetadata().getId(), context));
        }
        a.add(generateDeleteAndUpdateMetadata(a.getMetadata().getId(), context));
        AssertExtensions.assertGreaterThan("Unexpected LUSN before flush.", 0, a.getLowestUncommittedSequenceNumber());
        Assert.assertTrue("Unexpected value from mustFlush() when DeletedSegmentOperation queued up.", a.mustFlush());
        // Flush everything.
        a.flush(TIMEOUT).join();
        Assert.assertFalse("Unexpected value from mustFlush() after Deletion.", a.mustFlush());
        AssertExtensions.assertLessThan("Unexpected LUSN after flush.", 0, a.getLowestUncommittedSequenceNumber());
        Assert.assertTrue("Unexpected value from isDeleted() after Deletion.", a.getMetadata().isDeleted());
        Assert.assertTrue("Unexpected value from isDeletedInStorage() after Deletion.", a.getMetadata().isDeletedInStorage());
        // Verify that no segment exists in Storage after the flush.
        boolean existsInStorage = context.storage.exists(a.getMetadata().getName(), TIMEOUT).join();
        Assert.assertFalse("Segment still exists in Storage after Deletion.", existsInStorage);
    }
    Assert.assertFalse("Pending merger source segment not deleted.", context.storage.exists(withMergerSource.getMetadata().getName(), TIMEOUT).join());
    Assert.assertTrue("Attributes not deleted for non-merged segment.", context.dataSource.getPersistedAttributes(notSealed.getMetadata().getId()).isEmpty());
    Assert.assertTrue("Attributes not deleted for merger source segment.", context.dataSource.getPersistedAttributes(withMergerSource.getMetadata().getId()).isEmpty());
    Assert.assertTrue("Attributes not deleted for empty segment with attributes.", context.dataSource.getPersistedAttributes(emptyWithAttributes.getMetadata().getId()).isEmpty());
}
Also used : lombok.val(lombok.val) Arrays(java.util.Arrays) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AssertExtensions(io.pravega.test.common.AssertExtensions) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) RequiredArgsConstructor(lombok.RequiredArgsConstructor) Cleanup(lombok.Cleanup) Random(java.util.Random) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) AttributeUpdate(io.pravega.segmentstore.contracts.AttributeUpdate) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) ByteArrayInputStream(java.io.ByteArrayInputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) BufferView(io.pravega.common.util.BufferView) Duration(java.time.Duration) Map(java.util.Map) Operation(io.pravega.segmentstore.server.logs.operations.Operation) WriterFlushResult(io.pravega.segmentstore.server.WriterFlushResult) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) Attributes(io.pravega.segmentstore.contracts.Attributes) InMemoryStorage(io.pravega.segmentstore.storage.mocks.InMemoryStorage) Collectors(java.util.stream.Collectors) ErrorInjector(io.pravega.test.common.ErrorInjector) ByteBufferOutputStream(io.pravega.common.io.ByteBufferOutputStream) Stream(java.util.stream.Stream) ByteArraySegment(io.pravega.common.util.ByteArraySegment) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) Futures(io.pravega.common.concurrent.Futures) TestStorage(io.pravega.segmentstore.server.TestStorage) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Exceptions(io.pravega.common.Exceptions) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) Timeout(org.junit.rules.Timeout) OutputStream(java.io.OutputStream) ManualTimer(io.pravega.segmentstore.server.ManualTimer) UpdateAttributesOperation(io.pravega.segmentstore.server.logs.operations.UpdateAttributesOperation) AttributeId(io.pravega.segmentstore.contracts.AttributeId) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) AbstractMap(java.util.AbstractMap) AttributeUpdateCollection(io.pravega.segmentstore.contracts.AttributeUpdateCollection) Rule(org.junit.Rule) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) TreeMap(java.util.TreeMap) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Preconditions(com.google.common.base.Preconditions) AttributeUpdateType(io.pravega.segmentstore.contracts.AttributeUpdateType) RandomFactory(io.pravega.common.hash.RandomFactory) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) Collections(java.util.Collections) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) InputStream(java.io.InputStream) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) ByteArrayInputStream(java.io.ByteArrayInputStream) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Example 7 with DeleteSegmentOperation

use of io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation in project pravega by pravega.

the class SegmentAggregatorTests method testReconcileDelete.

/**
 * Tests the ability to reconcile a {@link DeleteSegmentOperation} on Segments in various states:
 * - Empty (not yet created).
 * - Empty (created, but no data).
 * - Not empty, not sealed.
 * - Sealed (empty or not).
 *
 * Reconciling a {@link DeleteSegmentOperation} is different from any other operation. Even if there are other
 * operations to reconcile, the simple presence of a Delete will bypass any other one and simply delete the segment.
 */
@Test
public void testReconcileDelete() throws Exception {
    final int appendLength = DEFAULT_CONFIG.getFlushThresholdBytes();
    @Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
    val notExistsWithAppend = context.transactionAggregators[0];
    val existsWithAppend = context.transactionAggregators[1];
    val existsWithSeal = context.transactionAggregators[2];
    val allAggregators = new SegmentAggregator[] { notExistsWithAppend, existsWithAppend, existsWithSeal };
    for (val a : allAggregators) {
        // Create the segment, and add 1 byte to it. This will cause initialize() to not treat it as empty.
        context.storage.create(a.getMetadata().getName(), TIMEOUT).thenCompose(v -> context.storage.openWrite(a.getMetadata().getName())).thenCompose(handle -> {
            ((UpdateableSegmentMetadata) a.getMetadata()).setLength(1L);
            ((UpdateableSegmentMetadata) a.getMetadata()).setStorageLength(1L);
            return context.storage.write(handle, 0, new ByteArrayInputStream(new byte[] { 1 }), 1, TIMEOUT);
        }).thenCompose(v -> a.initialize(TIMEOUT)).join();
        // Add enough data to trigger a flush.
        a.add(generateAppendAndUpdateMetadata(a.getMetadata().getId(), new byte[appendLength], context));
        if (a == existsWithSeal) {
            // Add a Seal for that segment that should be sealed.
            a.add(generateSealAndUpdateMetadata(existsWithSeal.getMetadata().getId(), context));
        }
        // Delete the Segment from Storage.
        Futures.exceptionallyExpecting(context.storage.openWrite(a.getMetadata().getName()).thenCompose(handle -> context.storage.delete(handle, TIMEOUT)), ex -> ex instanceof StreamSegmentNotExistsException, null).join();
        Assert.assertTrue("Unexpected value from mustFlush() before first flush().", a.mustFlush());
        // First attempt should fail.
        AssertExtensions.assertSuppliedFutureThrows("First invocation of flush() should fail.", () -> a.flush(TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
        Assert.assertTrue("Unexpected value from mustFlush() after failed flush().", a.mustFlush());
        // Add the DeleteSegmentOperation - this should cause reconciliation to succeed.
        a.add(generateDeleteAndUpdateMetadata(a.getMetadata().getId(), context));
        a.flush(TIMEOUT).join();
        Assert.assertFalse("Unexpected value from mustFlush() after Deletion.", a.mustFlush());
        AssertExtensions.assertLessThan("Unexpected LUSN after flush.", 0, a.getLowestUncommittedSequenceNumber());
        Assert.assertTrue("Unexpected value from isDeleted() after Deletion.", a.getMetadata().isDeleted());
        Assert.assertTrue("Unexpected value from isDeletedInStorage() after Deletion.", a.getMetadata().isDeletedInStorage());
        // Verify that no segment exists in Storage after the flush.
        boolean existsInStorage = context.storage.exists(a.getMetadata().getName(), TIMEOUT).join();
        Assert.assertFalse("Segment still exists in Storage after Deletion.", existsInStorage);
    }
}
Also used : lombok.val(lombok.val) Arrays(java.util.Arrays) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AssertExtensions(io.pravega.test.common.AssertExtensions) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) RequiredArgsConstructor(lombok.RequiredArgsConstructor) Cleanup(lombok.Cleanup) Random(java.util.Random) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) AttributeUpdate(io.pravega.segmentstore.contracts.AttributeUpdate) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) ByteArrayInputStream(java.io.ByteArrayInputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) BufferView(io.pravega.common.util.BufferView) Duration(java.time.Duration) Map(java.util.Map) Operation(io.pravega.segmentstore.server.logs.operations.Operation) WriterFlushResult(io.pravega.segmentstore.server.WriterFlushResult) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) Attributes(io.pravega.segmentstore.contracts.Attributes) InMemoryStorage(io.pravega.segmentstore.storage.mocks.InMemoryStorage) Collectors(java.util.stream.Collectors) ErrorInjector(io.pravega.test.common.ErrorInjector) ByteBufferOutputStream(io.pravega.common.io.ByteBufferOutputStream) Stream(java.util.stream.Stream) ByteArraySegment(io.pravega.common.util.ByteArraySegment) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) Futures(io.pravega.common.concurrent.Futures) TestStorage(io.pravega.segmentstore.server.TestStorage) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Exceptions(io.pravega.common.Exceptions) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) Timeout(org.junit.rules.Timeout) OutputStream(java.io.OutputStream) ManualTimer(io.pravega.segmentstore.server.ManualTimer) UpdateAttributesOperation(io.pravega.segmentstore.server.logs.operations.UpdateAttributesOperation) AttributeId(io.pravega.segmentstore.contracts.AttributeId) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) AbstractMap(java.util.AbstractMap) AttributeUpdateCollection(io.pravega.segmentstore.contracts.AttributeUpdateCollection) Rule(org.junit.Rule) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) TreeMap(java.util.TreeMap) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Preconditions(com.google.common.base.Preconditions) AttributeUpdateType(io.pravega.segmentstore.contracts.AttributeUpdateType) RandomFactory(io.pravega.common.hash.RandomFactory) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) Collections(java.util.Collections) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) InputStream(java.io.InputStream) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) ByteArrayInputStream(java.io.ByteArrayInputStream) Cleanup(lombok.Cleanup) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) Test(org.junit.Test)

Example 8 with DeleteSegmentOperation

use of io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation in project pravega by pravega.

the class ContainerMetadataUpdateTransaction method acceptOperation.

/**
 * Accepts the given Operation. The Operation's effects are reflected in the pending transaction.
 * This method has no effect on Metadata Operations.
 * See OperationMetadataUpdater.acceptOperation for more details on behavior.
 *
 * @param operation The operation to accept.
 * @throws MetadataUpdateException If the given operation was rejected given the current state of the metadata.
 * @throws NullPointerException    If the operation is null.
 */
void acceptOperation(Operation operation) throws MetadataUpdateException {
    checkNotSealed();
    if (operation instanceof SegmentOperation) {
        val segmentMetadata = getSegmentUpdateTransaction(((SegmentOperation) operation).getStreamSegmentId());
        segmentMetadata.setLastUsed(operation.getSequenceNumber());
        if (operation instanceof StreamSegmentAppendOperation) {
            segmentMetadata.acceptOperation((StreamSegmentAppendOperation) operation);
        } else if (operation instanceof StreamSegmentSealOperation) {
            segmentMetadata.acceptOperation((StreamSegmentSealOperation) operation);
        } else if (operation instanceof MergeSegmentOperation) {
            MergeSegmentOperation mto = (MergeSegmentOperation) operation;
            SegmentMetadataUpdateTransaction sourceMetadata = getSegmentUpdateTransaction(mto.getSourceSegmentId());
            sourceMetadata.acceptAsSourceSegment(mto);
            sourceMetadata.setLastUsed(operation.getSequenceNumber());
            segmentMetadata.acceptAsTargetSegment(mto, sourceMetadata);
        } else if (operation instanceof UpdateAttributesOperation) {
            segmentMetadata.acceptOperation((UpdateAttributesOperation) operation);
        } else if (operation instanceof StreamSegmentTruncateOperation) {
            segmentMetadata.acceptOperation((StreamSegmentTruncateOperation) operation);
        } else if (operation instanceof DeleteSegmentOperation) {
            segmentMetadata.acceptOperation((DeleteSegmentOperation) operation);
        }
    }
    if (operation instanceof CheckpointOperationBase) {
        if (operation instanceof MetadataCheckpointOperation) {
            // A MetadataCheckpointOperation represents a valid truncation point. Record it as such.
            this.newTruncationPoints.add(operation.getSequenceNumber());
        }
        // Checkpoint operation has been serialized and we no longer need its contents. Clear it and release any
        // memory it used.
        ((CheckpointOperationBase) operation).clearContents();
    } else if (operation instanceof StreamSegmentMapOperation) {
        acceptMetadataOperation((StreamSegmentMapOperation) operation);
    }
}
Also used : lombok.val(lombok.val) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) UpdateAttributesOperation(io.pravega.segmentstore.server.logs.operations.UpdateAttributesOperation) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) SegmentOperation(io.pravega.segmentstore.server.SegmentOperation) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) CheckpointOperationBase(io.pravega.segmentstore.server.logs.operations.CheckpointOperationBase) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation)

Example 9 with DeleteSegmentOperation

use of io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation in project pravega by pravega.

the class DurableLogTests method testRecoveryWithIncrementalCheckpoints.

/**
 * Tests the DurableLog recovery process when there are multiple {@link MetadataCheckpointOperation}s added, with each
 * such checkpoint including information about evicted segments or segments which had their storage state modified.
 */
@Test
public void testRecoveryWithIncrementalCheckpoints() throws Exception {
    final int streamSegmentCount = 50;
    // Setup a DurableLog and start it.
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()));
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    // First DurableLog. We use this for generating data.
    val metadata1 = new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup CacheStorage cacheStorage = new DirectMemoryCache(Integer.MAX_VALUE);
    @Cleanup CacheManager cacheManager = new CacheManager(CachePolicy.INFINITE, cacheStorage, executorService());
    List<Long> deletedIds;
    Set<Long> evictIds;
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata1, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata1, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Create some segments.
        val segmentIds = new ArrayList<>(createStreamSegmentsWithOperations(streamSegmentCount, durableLog));
        deletedIds = segmentIds.subList(0, 5);
        val mergedFromIds = segmentIds.subList(5, 10);
        // Must be same length as mergeFrom
        val mergedToIds = segmentIds.subList(10, 15);
        evictIds = new HashSet<>(segmentIds.subList(15, 20));
        val changeStorageStateIds = segmentIds.subList(20, segmentIds.size() - 5);
        // Append something to each segment.
        for (val segmentId : segmentIds) {
            if (!evictIds.contains(segmentId)) {
                durableLog.add(new StreamSegmentAppendOperation(segmentId, generateAppendData((int) (long) segmentId), null), OperationPriority.Normal, TIMEOUT).join();
            }
        }
        // Checkpoint 1.
        durableLog.checkpoint(TIMEOUT).join();
        // Delete some segments.
        for (val segmentId : deletedIds) {
            durableLog.add(new DeleteSegmentOperation(segmentId), OperationPriority.Normal, TIMEOUT).join();
        }
        // Checkpoint 2.
        durableLog.checkpoint(TIMEOUT).join();
        // Merge some segments.
        for (int i = 0; i < mergedFromIds.size(); i++) {
            durableLog.add(new StreamSegmentSealOperation(mergedFromIds.get(i)), OperationPriority.Normal, TIMEOUT).join();
            durableLog.add(new MergeSegmentOperation(mergedToIds.get(i), mergedFromIds.get(i)), OperationPriority.Normal, TIMEOUT).join();
        }
        // Checkpoint 3.
        durableLog.checkpoint(TIMEOUT).join();
        // Evict some segments.
        val evictableContainerMetadata = (EvictableMetadata) metadata1;
        metadata1.removeTruncationMarkers(metadata1.getOperationSequenceNumber());
        val toEvict = evictableContainerMetadata.getEvictionCandidates(Integer.MAX_VALUE, segmentIds.size()).stream().filter(m -> evictIds.contains(m.getId())).collect(Collectors.toList());
        val evicted = evictableContainerMetadata.cleanup(toEvict, Integer.MAX_VALUE);
        AssertExtensions.assertContainsSameElements("", evictIds, evicted.stream().map(SegmentMetadata::getId).collect(Collectors.toList()));
        // Checkpoint 4.
        durableLog.checkpoint(TIMEOUT).join();
        // Update storage state for some segments.
        for (val segmentId : changeStorageStateIds) {
            val sm = metadata1.getStreamSegmentMetadata(segmentId);
            if (segmentId % 3 == 0) {
                sm.setStorageLength(sm.getLength());
            }
            if (segmentId % 4 == 0) {
                sm.markSealed();
                sm.markSealedInStorage();
            }
            if (segmentId % 5 == 0) {
                sm.markDeleted();
                sm.markDeletedInStorage();
            }
        }
        // Checkpoint 5.
        durableLog.checkpoint(TIMEOUT).join();
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
    // Second DurableLog. We use this for recovery.
    val metadata2 = new MetadataBuilder(CONTAINER_ID).build();
    try (ContainerReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata2, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata2, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Validate metadata matches.
        val expectedSegmentIds = metadata1.getAllStreamSegmentIds();
        val actualSegmentIds = metadata2.getAllStreamSegmentIds();
        AssertExtensions.assertContainsSameElements("Unexpected set of recovered segments. Only Active segments expected to have been recovered.", expectedSegmentIds, actualSegmentIds);
        val expectedSegments = expectedSegmentIds.stream().sorted().map(metadata1::getStreamSegmentMetadata).collect(Collectors.toList());
        val actualSegments = actualSegmentIds.stream().sorted().map(metadata2::getStreamSegmentMetadata).collect(Collectors.toList());
        for (int i = 0; i < expectedSegments.size(); i++) {
            val e = expectedSegments.get(i);
            val a = actualSegments.get(i);
            SegmentMetadataComparer.assertEquals("Recovered segment metadata mismatch", e, a);
        }
        // Validate read index is as it should. Here, we can only check if the read indices for evicted segments are
        // no longer loaded; we do more thorough checks in the ContainerReadIndexTests suite.
        Streams.concat(evictIds.stream(), deletedIds.stream()).forEach(segmentId -> Assert.assertNull("Not expecting a read index for an evicted or deleted segment.", readIndex.getIndex(segmentId)));
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
}
Also used : Storage(io.pravega.segmentstore.storage.Storage) StreamSegmentInformation(io.pravega.segmentstore.contracts.StreamSegmentInformation) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) OperationPriority(io.pravega.segmentstore.server.logs.operations.OperationPriority) SneakyThrows(lombok.SneakyThrows) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) AssertExtensions(io.pravega.test.common.AssertExtensions) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) TimeoutException(java.util.concurrent.TimeoutException) Cleanup(lombok.Cleanup) LogAddress(io.pravega.segmentstore.storage.LogAddress) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) DataLogNotAvailableException(io.pravega.segmentstore.storage.DataLogNotAvailableException) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) CheckpointOperationBase(io.pravega.segmentstore.server.logs.operations.CheckpointOperationBase) InMemoryStorageFactory(io.pravega.segmentstore.storage.mocks.InMemoryStorageFactory) Duration(java.time.Duration) CachePolicy(io.pravega.segmentstore.server.CachePolicy) Operation(io.pravega.segmentstore.server.logs.operations.Operation) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) ServiceListeners(io.pravega.segmentstore.server.ServiceListeners) ContainerOfflineException(io.pravega.segmentstore.server.ContainerOfflineException) Predicate(java.util.function.Predicate) Collection(java.util.Collection) Set(java.util.Set) CompletionException(java.util.concurrent.CompletionException) Streams(com.google.common.collect.Streams) DataLogWriterNotPrimaryException(io.pravega.segmentstore.storage.DataLogWriterNotPrimaryException) Collectors(java.util.stream.Collectors) SegmentMetadataComparer(io.pravega.segmentstore.server.SegmentMetadataComparer) ErrorInjector(io.pravega.test.common.ErrorInjector) List(java.util.List) ByteArraySegment(io.pravega.common.util.ByteArraySegment) StreamSegmentContainerMetadata(io.pravega.segmentstore.server.containers.StreamSegmentContainerMetadata) DirectMemoryCache(io.pravega.segmentstore.storage.cache.DirectMemoryCache) TestUtils(io.pravega.test.common.TestUtils) Queue(java.util.Queue) Futures(io.pravega.common.concurrent.Futures) CacheManager(io.pravega.segmentstore.server.CacheManager) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) StreamSegmentException(io.pravega.segmentstore.contracts.StreamSegmentException) Exceptions(io.pravega.common.Exceptions) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) EvictableMetadata(io.pravega.segmentstore.server.EvictableMetadata) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) ReadIndexConfig(io.pravega.segmentstore.server.reading.ReadIndexConfig) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) Timeout(org.junit.rules.Timeout) DurableDataLogException(io.pravega.segmentstore.storage.DurableDataLogException) OperationComparer(io.pravega.segmentstore.server.logs.operations.OperationComparer) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) DataLogDisabledException(io.pravega.segmentstore.storage.DataLogDisabledException) Iterator(java.util.Iterator) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) OperationLog(io.pravega.segmentstore.server.OperationLog) IOException(java.io.IOException) Test(org.junit.Test) TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) Service(com.google.common.util.concurrent.Service) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) TimeUnit(java.util.concurrent.TimeUnit) AbstractMap(java.util.AbstractMap) Rule(org.junit.Rule) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Data(lombok.Data) ReadIndex(io.pravega.segmentstore.server.ReadIndex) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) Collections(java.util.Collections) CompositeArrayView(io.pravega.common.util.CompositeArrayView) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) InputStream(java.io.InputStream) DirectMemoryCache(io.pravega.segmentstore.storage.cache.DirectMemoryCache) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) ArrayList(java.util.ArrayList) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Cleanup(lombok.Cleanup) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) EvictableMetadata(io.pravega.segmentstore.server.EvictableMetadata) CacheManager(io.pravega.segmentstore.server.CacheManager) lombok.val(lombok.val) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) ReadIndex(io.pravega.segmentstore.server.ReadIndex) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) Storage(io.pravega.segmentstore.storage.Storage) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) Test(org.junit.Test)

Example 10 with DeleteSegmentOperation

use of io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation in project pravega by pravega.

the class DataRecoveryTest method testLogEditOperationObject.

@Test
public void testLogEditOperationObject() throws IOException {
    // Setup command object.
    STATE.set(new AdminCommandState());
    Properties pravegaProperties = new Properties();
    pravegaProperties.setProperty("pravegaservice.container.count", "1");
    pravegaProperties.setProperty("pravegaservice.clusterName", "pravega0");
    STATE.get().getConfigBuilder().include(pravegaProperties);
    // Delete Edit Operations should not take into account the newOperation field doing equality.
    Assert.assertEquals(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.DELETE_OPERATION, 1, 2, null), new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.DELETE_OPERATION, 1, 2, new DeleteSegmentOperation(1)));
    Assert.assertEquals(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.DELETE_OPERATION, 1, 2, null).hashCode(), new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.DELETE_OPERATION, 1, 2, new DeleteSegmentOperation(1)).hashCode());
    // Other cases for equality of operations.
    Assert.assertEquals(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, null), new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, null));
    Assert.assertEquals(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, new DeleteSegmentOperation(1)), new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, new DeleteSegmentOperation(1)));
    // Equality of payload operations are checked by type and sequence number, which are the common attributes of Operation class.
    DurableDataLogRepairCommand.LogEditOperation deleteOp = new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, new DeleteSegmentOperation(2));
    Assert.assertEquals(deleteOp, new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, new DeleteSegmentOperation(1)));
    deleteOp.getNewOperation().resetSequenceNumber(123);
    Assert.assertNotEquals(deleteOp, new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, new DeleteSegmentOperation(1)));
    // Test the cases for the same object reference and for null comparison.
    DurableDataLogRepairCommand.LogEditOperation sameOp = new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, new DeleteSegmentOperation(1));
    Assert.assertEquals(sameOp, sameOp);
    Assert.assertNotEquals(sameOp, null);
    Assert.assertNotEquals(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, null), new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, new DeleteSegmentOperation(1)));
    Assert.assertNotEquals(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.REPLACE_OPERATION, 1, 2, null), new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, new DeleteSegmentOperation(1)));
    Assert.assertNotEquals(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, null), new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 2, 2, new DeleteSegmentOperation(1)));
    Assert.assertNotEquals(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 2, new DeleteSegmentOperation(1)), new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 1, new DeleteSegmentOperation(1)));
}
Also used : DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) Properties(java.util.Properties) AdminCommandState(io.pravega.cli.admin.AdminCommandState) Test(org.junit.Test)

Aggregations

DeleteSegmentOperation (io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation)12 MergeSegmentOperation (io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation)8 Test (org.junit.Test)8 SegmentProperties (io.pravega.segmentstore.contracts.SegmentProperties)7 StreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation)7 StreamSegmentSealOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation)7 StreamSegmentTruncateOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation)7 UpdateAttributesOperation (io.pravega.segmentstore.server.logs.operations.UpdateAttributesOperation)7 lombok.val (lombok.val)7 StreamSegmentMapOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation)6 StreamSegmentNotExistsException (io.pravega.segmentstore.contracts.StreamSegmentNotExistsException)5 UpdateableSegmentMetadata (io.pravega.segmentstore.server.UpdateableSegmentMetadata)5 MetadataCheckpointOperation (io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation)5 Operation (io.pravega.segmentstore.server.logs.operations.Operation)5 StorageMetadataCheckpointOperation (io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation)5 ArrayList (java.util.ArrayList)5 HashMap (java.util.HashMap)4 AdminCommandState (io.pravega.cli.admin.AdminCommandState)3 CommandArgs (io.pravega.cli.admin.CommandArgs)3 Exceptions (io.pravega.common.Exceptions)3