Search in sources :

Example 16 with MergeSegmentOperation

use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.

the class SegmentAggregatorTests method generateMergeTransactionAndUpdateMetadata.

private StorageOperation generateMergeTransactionAndUpdateMetadata(long targetId, long sourceId, TestContext context) {
    UpdateableSegmentMetadata sourceMetadata = context.containerMetadata.getStreamSegmentMetadata(sourceId);
    UpdateableSegmentMetadata targetMetadata = context.containerMetadata.getStreamSegmentMetadata(targetId);
    MergeSegmentOperation op = new MergeSegmentOperation(targetMetadata.getId(), sourceMetadata.getId());
    op.setLength(sourceMetadata.getLength());
    op.setStreamSegmentOffset(targetMetadata.getLength());
    targetMetadata.setLength(targetMetadata.getLength() + sourceMetadata.getLength());
    sourceMetadata.markMerged();
    op.setSequenceNumber(context.containerMetadata.nextOperationSequenceNumber());
    return op;
}
Also used : UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation)

Example 17 with MergeSegmentOperation

use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.

the class SegmentAggregatorTests method testRecoveryEmptyMergeOperation.

/**
 * Tests a scenario where a MergeSegmentOperation needs to be recovered but which has already been merged in Storage.
 */
@Test
public void testRecoveryEmptyMergeOperation() throws Exception {
    @Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
    // Create a parent segment and one transaction segment.
    context.segmentAggregator.initialize(TIMEOUT).join();
    // Part 1: When the source segment is missing from Storage, but metadata does not reflect that.
    SegmentAggregator ta0 = context.transactionAggregators[0];
    context.storage.create(ta0.getMetadata().getName(), TIMEOUT).join();
    context.storage.openWrite(ta0.getMetadata().getName()).thenCompose(txnHandle -> context.storage.seal(txnHandle, TIMEOUT)).join();
    val txn0Metadata = context.containerMetadata.getStreamSegmentMetadata(ta0.getMetadata().getId());
    txn0Metadata.markSealed();
    txn0Metadata.markSealedInStorage();
    ta0.initialize(TIMEOUT).join();
    context.storage.delete(context.storage.openWrite(txn0Metadata.getName()).join(), TIMEOUT).join();
    // This is the operation that should be reconciled.
    context.segmentAggregator.add(generateMergeTransactionAndUpdateMetadata(ta0.getMetadata().getId(), context));
    // Verify the operation was ack-ed.
    AtomicBoolean mergeAcked = new AtomicBoolean();
    context.dataSource.setCompleteMergeCallback((target, source) -> mergeAcked.set(true));
    context.segmentAggregator.flush(TIMEOUT).join();
    Assert.assertTrue("Merge was not ack-ed for deleted source segment.", mergeAcked.get());
    // Part 2: When the source segment's metadata indicates it was deleted.
    SegmentAggregator ta1 = context.transactionAggregators[1];
    context.storage.create(ta1.getMetadata().getName(), TIMEOUT).join();
    context.storage.openWrite(ta1.getMetadata().getName()).thenCompose(txnHandle -> context.storage.seal(txnHandle, TIMEOUT)).join();
    val txn1Metadata = context.containerMetadata.getStreamSegmentMetadata(ta1.getMetadata().getId());
    txn1Metadata.markDeleted();
    // This is the operation that should be reconciled.
    context.segmentAggregator.add(generateMergeTransactionAndUpdateMetadata(ta1.getMetadata().getId(), context));
    // Verify the operation was ack-ed.
    mergeAcked.set(false);
    context.dataSource.setCompleteMergeCallback((target, source) -> mergeAcked.set(true));
    context.segmentAggregator.flush(TIMEOUT).join();
    // Finally, verify that all operations were ack-ed back.
    Assert.assertTrue("Merge was not ack-ed for deleted source segment.", mergeAcked.get());
}
Also used : Arrays(java.util.Arrays) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AssertExtensions(io.pravega.test.common.AssertExtensions) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) RequiredArgsConstructor(lombok.RequiredArgsConstructor) Cleanup(lombok.Cleanup) Random(java.util.Random) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) AttributeUpdate(io.pravega.segmentstore.contracts.AttributeUpdate) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) ByteArrayInputStream(java.io.ByteArrayInputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) BufferView(io.pravega.common.util.BufferView) Duration(java.time.Duration) Map(java.util.Map) Operation(io.pravega.segmentstore.server.logs.operations.Operation) WriterFlushResult(io.pravega.segmentstore.server.WriterFlushResult) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) Attributes(io.pravega.segmentstore.contracts.Attributes) InMemoryStorage(io.pravega.segmentstore.storage.mocks.InMemoryStorage) Collectors(java.util.stream.Collectors) ErrorInjector(io.pravega.test.common.ErrorInjector) ByteBufferOutputStream(io.pravega.common.io.ByteBufferOutputStream) Stream(java.util.stream.Stream) ByteArraySegment(io.pravega.common.util.ByteArraySegment) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) Futures(io.pravega.common.concurrent.Futures) TestStorage(io.pravega.segmentstore.server.TestStorage) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Exceptions(io.pravega.common.Exceptions) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) Timeout(org.junit.rules.Timeout) OutputStream(java.io.OutputStream) ManualTimer(io.pravega.segmentstore.server.ManualTimer) UpdateAttributesOperation(io.pravega.segmentstore.server.logs.operations.UpdateAttributesOperation) AttributeId(io.pravega.segmentstore.contracts.AttributeId) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) AbstractMap(java.util.AbstractMap) AttributeUpdateCollection(io.pravega.segmentstore.contracts.AttributeUpdateCollection) Rule(org.junit.Rule) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) TreeMap(java.util.TreeMap) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Preconditions(com.google.common.base.Preconditions) AttributeUpdateType(io.pravega.segmentstore.contracts.AttributeUpdateType) RandomFactory(io.pravega.common.hash.RandomFactory) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) Collections(java.util.Collections) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) InputStream(java.io.InputStream) lombok.val(lombok.val) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Example 18 with MergeSegmentOperation

use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.

the class SegmentAggregatorTests method testAddWithBadInput.

/**
 * Tests the add() method with invalid arguments.
 */
@Test
public void testAddWithBadInput() throws Exception {
    final long badTransactionId = 12345;
    final long badParentId = 56789;
    final String badParentName = "Foo_Parent";
    final String badTransactionName = "Foo_Transaction";
    @Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
    // We only needs one Transaction for this test.
    SegmentAggregator transactionAggregator = context.transactionAggregators[0];
    SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
    context.segmentAggregator.initialize(TIMEOUT).join();
    transactionAggregator.initialize(TIMEOUT).join();
    // Create 2 more segments that can be used to verify MergeSegmentOperation.
    context.containerMetadata.mapStreamSegmentId(badParentName, badParentId);
    UpdateableSegmentMetadata badTransactionMetadata = context.containerMetadata.mapStreamSegmentId(badTransactionName, badTransactionId);
    badTransactionMetadata.setLength(0);
    badTransactionMetadata.setStorageLength(0);
    // 1. MergeSegmentOperation
    // Verify that MergeSegmentOperation cannot be added to the Segment to be merged.
    AssertExtensions.assertThrows("add() allowed a MergeSegmentOperation on the Transaction segment.", () -> transactionAggregator.add(generateSimpleMergeTransaction(transactionMetadata.getId(), context)), ex -> ex instanceof IllegalArgumentException);
    // 2. StreamSegmentSealOperation.
    // 2a. Verify we cannot add a StreamSegmentSealOperation if the segment is not sealed yet.
    AssertExtensions.assertThrows("add() allowed a StreamSegmentSealOperation for a non-sealed segment.", () -> {
        @Cleanup SegmentAggregator badTransactionAggregator = new SegmentAggregator(badTransactionMetadata, context.dataSource, context.storage, DEFAULT_CONFIG, context.timer, executorService());
        badTransactionAggregator.initialize(TIMEOUT).join();
        badTransactionAggregator.add(generateSimpleSeal(badTransactionId, context));
    }, ex -> ex instanceof DataCorruptionException);
    // 2b. Verify that nothing is allowed after Seal (after adding one append to and sealing the Transaction Segment).
    StorageOperation transactionAppend1 = generateAppendAndUpdateMetadata(0, transactionMetadata.getId(), context);
    transactionAggregator.add(transactionAppend1);
    transactionAggregator.add(generateSealAndUpdateMetadata(transactionMetadata.getId(), context));
    AssertExtensions.assertThrows("add() allowed operation after seal.", () -> transactionAggregator.add(generateSimpleAppend(transactionMetadata.getId(), context)), ex -> ex instanceof DataCorruptionException);
    // 3. CachedStreamSegmentAppendOperation.
    final StorageOperation parentAppend1 = generateAppendAndUpdateMetadata(0, SEGMENT_ID, context);
    // 3a. Verify we cannot add StreamSegmentAppendOperations.
    AssertExtensions.assertThrows("add() allowed a StreamSegmentAppendOperation.", () -> {
        // We have the correct offset, but we did not increase the Length.
        StreamSegmentAppendOperation badAppend = new StreamSegmentAppendOperation(parentAppend1.getStreamSegmentId(), parentAppend1.getStreamSegmentOffset(), new ByteArraySegment(new byte[(int) parentAppend1.getLength()]), null);
        context.segmentAggregator.add(badAppend);
    }, ex -> ex instanceof IllegalArgumentException);
    // Add this one append to the parent (nothing unusual here); we'll use this for the next tests.
    context.segmentAggregator.add(parentAppend1);
    // 3b. Verify we cannot add anything beyond the DurableLogOffset (offset or offset+length).
    val appendData = new ByteArraySegment("foo".getBytes());
    AssertExtensions.assertThrows("add() allowed an operation beyond the DurableLogOffset (offset).", () -> {
        // We have the correct offset, but we did not increase the Length.
        StreamSegmentAppendOperation badAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), appendData, null);
        badAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
        context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badAppend));
    }, ex -> ex instanceof DataCorruptionException);
    ((UpdateableSegmentMetadata) context.segmentAggregator.getMetadata()).setLength(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength() + 1);
    AssertExtensions.assertThrows("add() allowed an operation beyond the DurableLogOffset (offset+length).", () -> {
        // We have the correct offset, but we the append exceeds the Length by 1 byte.
        StreamSegmentAppendOperation badAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), appendData, null);
        badAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
        context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badAppend));
    }, ex -> ex instanceof DataCorruptionException);
    // 3c. Verify contiguity (offsets - we cannot have gaps in the data).
    AssertExtensions.assertThrows("add() allowed an operation with wrong offset (too small).", () -> {
        StreamSegmentAppendOperation badOffsetAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), appendData, null);
        badOffsetAppend.setStreamSegmentOffset(0);
        context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badOffsetAppend));
    }, ex -> ex instanceof DataCorruptionException);
    AssertExtensions.assertThrows("add() allowed an operation with wrong offset (too large).", () -> {
        StreamSegmentAppendOperation badOffsetAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), appendData, null);
        badOffsetAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength() + 1);
        context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badOffsetAppend));
    }, ex -> ex instanceof DataCorruptionException);
    AssertExtensions.assertThrows("add() allowed an operation with wrong offset (too large, but no pending operations).", () -> {
        @Cleanup SegmentAggregator badTransactionAggregator = new SegmentAggregator(badTransactionMetadata, context.dataSource, context.storage, DEFAULT_CONFIG, context.timer, executorService());
        badTransactionMetadata.setLength(100);
        badTransactionAggregator.initialize(TIMEOUT).join();
        StreamSegmentAppendOperation badOffsetAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), appendData, null);
        badOffsetAppend.setStreamSegmentOffset(1);
        context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badOffsetAppend));
    }, ex -> ex instanceof DataCorruptionException);
    // 4. Verify Segment Id match.
    AssertExtensions.assertThrows("add() allowed an Append operation with wrong Segment Id.", () -> {
        StreamSegmentAppendOperation badIdAppend = new StreamSegmentAppendOperation(Integer.MAX_VALUE, appendData, null);
        badIdAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
        context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badIdAppend));
    }, ex -> ex instanceof IllegalArgumentException);
    AssertExtensions.assertThrows("add() allowed a StreamSegmentSealOperation with wrong SegmentId.", () -> {
        StreamSegmentSealOperation badIdSeal = new StreamSegmentSealOperation(Integer.MAX_VALUE);
        badIdSeal.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
        context.segmentAggregator.add(badIdSeal);
    }, ex -> ex instanceof IllegalArgumentException);
    AssertExtensions.assertThrows("add() allowed a MergeSegmentOperation with wrong SegmentId.", () -> {
        MergeSegmentOperation badIdMerge = new MergeSegmentOperation(Integer.MAX_VALUE, transactionMetadata.getId());
        badIdMerge.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
        badIdMerge.setLength(1);
        context.segmentAggregator.add(badIdMerge);
    }, ex -> ex instanceof IllegalArgumentException);
    // 5. Truncations.
    AssertExtensions.assertThrows("add() allowed a StreamSegmentTruncateOperation with a truncation offset beyond the one in the metadata.", () -> {
        StreamSegmentTruncateOperation op = new StreamSegmentTruncateOperation(SEGMENT_ID, 10);
        op.setSequenceNumber(context.containerMetadata.nextOperationSequenceNumber());
        context.segmentAggregator.add(op);
    }, ex -> ex instanceof DataCorruptionException);
}
Also used : lombok.val(lombok.val) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) ByteArraySegment(io.pravega.common.util.ByteArraySegment) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Cleanup(lombok.Cleanup) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Test(org.junit.Test)

Example 19 with MergeSegmentOperation

use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.

the class ContainerMetadataUpdateTransaction method acceptOperation.

/**
 * Accepts the given Operation. The Operation's effects are reflected in the pending transaction.
 * This method has no effect on Metadata Operations.
 * See OperationMetadataUpdater.acceptOperation for more details on behavior.
 *
 * @param operation The operation to accept.
 * @throws MetadataUpdateException If the given operation was rejected given the current state of the metadata.
 * @throws NullPointerException    If the operation is null.
 */
void acceptOperation(Operation operation) throws MetadataUpdateException {
    checkNotSealed();
    if (operation instanceof SegmentOperation) {
        val segmentMetadata = getSegmentUpdateTransaction(((SegmentOperation) operation).getStreamSegmentId());
        segmentMetadata.setLastUsed(operation.getSequenceNumber());
        if (operation instanceof StreamSegmentAppendOperation) {
            segmentMetadata.acceptOperation((StreamSegmentAppendOperation) operation);
        } else if (operation instanceof StreamSegmentSealOperation) {
            segmentMetadata.acceptOperation((StreamSegmentSealOperation) operation);
        } else if (operation instanceof MergeSegmentOperation) {
            MergeSegmentOperation mto = (MergeSegmentOperation) operation;
            SegmentMetadataUpdateTransaction sourceMetadata = getSegmentUpdateTransaction(mto.getSourceSegmentId());
            sourceMetadata.acceptAsSourceSegment(mto);
            sourceMetadata.setLastUsed(operation.getSequenceNumber());
            segmentMetadata.acceptAsTargetSegment(mto, sourceMetadata);
        } else if (operation instanceof UpdateAttributesOperation) {
            segmentMetadata.acceptOperation((UpdateAttributesOperation) operation);
        } else if (operation instanceof StreamSegmentTruncateOperation) {
            segmentMetadata.acceptOperation((StreamSegmentTruncateOperation) operation);
        } else if (operation instanceof DeleteSegmentOperation) {
            segmentMetadata.acceptOperation((DeleteSegmentOperation) operation);
        }
    }
    if (operation instanceof CheckpointOperationBase) {
        if (operation instanceof MetadataCheckpointOperation) {
            // A MetadataCheckpointOperation represents a valid truncation point. Record it as such.
            this.newTruncationPoints.add(operation.getSequenceNumber());
        }
        // Checkpoint operation has been serialized and we no longer need its contents. Clear it and release any
        // memory it used.
        ((CheckpointOperationBase) operation).clearContents();
    } else if (operation instanceof StreamSegmentMapOperation) {
        acceptMetadataOperation((StreamSegmentMapOperation) operation);
    }
}
Also used : lombok.val(lombok.val) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) UpdateAttributesOperation(io.pravega.segmentstore.server.logs.operations.UpdateAttributesOperation) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) SegmentOperation(io.pravega.segmentstore.server.SegmentOperation) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) CheckpointOperationBase(io.pravega.segmentstore.server.logs.operations.CheckpointOperationBase) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation)

Example 20 with MergeSegmentOperation

use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.

the class DurableLogTests method testRecoveryWithIncrementalCheckpoints.

/**
 * Tests the DurableLog recovery process when there are multiple {@link MetadataCheckpointOperation}s added, with each
 * such checkpoint including information about evicted segments or segments which had their storage state modified.
 */
@Test
public void testRecoveryWithIncrementalCheckpoints() throws Exception {
    final int streamSegmentCount = 50;
    // Setup a DurableLog and start it.
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()));
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    // First DurableLog. We use this for generating data.
    val metadata1 = new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup CacheStorage cacheStorage = new DirectMemoryCache(Integer.MAX_VALUE);
    @Cleanup CacheManager cacheManager = new CacheManager(CachePolicy.INFINITE, cacheStorage, executorService());
    List<Long> deletedIds;
    Set<Long> evictIds;
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata1, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata1, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Create some segments.
        val segmentIds = new ArrayList<>(createStreamSegmentsWithOperations(streamSegmentCount, durableLog));
        deletedIds = segmentIds.subList(0, 5);
        val mergedFromIds = segmentIds.subList(5, 10);
        // Must be same length as mergeFrom
        val mergedToIds = segmentIds.subList(10, 15);
        evictIds = new HashSet<>(segmentIds.subList(15, 20));
        val changeStorageStateIds = segmentIds.subList(20, segmentIds.size() - 5);
        // Append something to each segment.
        for (val segmentId : segmentIds) {
            if (!evictIds.contains(segmentId)) {
                durableLog.add(new StreamSegmentAppendOperation(segmentId, generateAppendData((int) (long) segmentId), null), OperationPriority.Normal, TIMEOUT).join();
            }
        }
        // Checkpoint 1.
        durableLog.checkpoint(TIMEOUT).join();
        // Delete some segments.
        for (val segmentId : deletedIds) {
            durableLog.add(new DeleteSegmentOperation(segmentId), OperationPriority.Normal, TIMEOUT).join();
        }
        // Checkpoint 2.
        durableLog.checkpoint(TIMEOUT).join();
        // Merge some segments.
        for (int i = 0; i < mergedFromIds.size(); i++) {
            durableLog.add(new StreamSegmentSealOperation(mergedFromIds.get(i)), OperationPriority.Normal, TIMEOUT).join();
            durableLog.add(new MergeSegmentOperation(mergedToIds.get(i), mergedFromIds.get(i)), OperationPriority.Normal, TIMEOUT).join();
        }
        // Checkpoint 3.
        durableLog.checkpoint(TIMEOUT).join();
        // Evict some segments.
        val evictableContainerMetadata = (EvictableMetadata) metadata1;
        metadata1.removeTruncationMarkers(metadata1.getOperationSequenceNumber());
        val toEvict = evictableContainerMetadata.getEvictionCandidates(Integer.MAX_VALUE, segmentIds.size()).stream().filter(m -> evictIds.contains(m.getId())).collect(Collectors.toList());
        val evicted = evictableContainerMetadata.cleanup(toEvict, Integer.MAX_VALUE);
        AssertExtensions.assertContainsSameElements("", evictIds, evicted.stream().map(SegmentMetadata::getId).collect(Collectors.toList()));
        // Checkpoint 4.
        durableLog.checkpoint(TIMEOUT).join();
        // Update storage state for some segments.
        for (val segmentId : changeStorageStateIds) {
            val sm = metadata1.getStreamSegmentMetadata(segmentId);
            if (segmentId % 3 == 0) {
                sm.setStorageLength(sm.getLength());
            }
            if (segmentId % 4 == 0) {
                sm.markSealed();
                sm.markSealedInStorage();
            }
            if (segmentId % 5 == 0) {
                sm.markDeleted();
                sm.markDeletedInStorage();
            }
        }
        // Checkpoint 5.
        durableLog.checkpoint(TIMEOUT).join();
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
    // Second DurableLog. We use this for recovery.
    val metadata2 = new MetadataBuilder(CONTAINER_ID).build();
    try (ContainerReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata2, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata2, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Validate metadata matches.
        val expectedSegmentIds = metadata1.getAllStreamSegmentIds();
        val actualSegmentIds = metadata2.getAllStreamSegmentIds();
        AssertExtensions.assertContainsSameElements("Unexpected set of recovered segments. Only Active segments expected to have been recovered.", expectedSegmentIds, actualSegmentIds);
        val expectedSegments = expectedSegmentIds.stream().sorted().map(metadata1::getStreamSegmentMetadata).collect(Collectors.toList());
        val actualSegments = actualSegmentIds.stream().sorted().map(metadata2::getStreamSegmentMetadata).collect(Collectors.toList());
        for (int i = 0; i < expectedSegments.size(); i++) {
            val e = expectedSegments.get(i);
            val a = actualSegments.get(i);
            SegmentMetadataComparer.assertEquals("Recovered segment metadata mismatch", e, a);
        }
        // Validate read index is as it should. Here, we can only check if the read indices for evicted segments are
        // no longer loaded; we do more thorough checks in the ContainerReadIndexTests suite.
        Streams.concat(evictIds.stream(), deletedIds.stream()).forEach(segmentId -> Assert.assertNull("Not expecting a read index for an evicted or deleted segment.", readIndex.getIndex(segmentId)));
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
}
Also used : Storage(io.pravega.segmentstore.storage.Storage) StreamSegmentInformation(io.pravega.segmentstore.contracts.StreamSegmentInformation) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) OperationPriority(io.pravega.segmentstore.server.logs.operations.OperationPriority) SneakyThrows(lombok.SneakyThrows) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) AssertExtensions(io.pravega.test.common.AssertExtensions) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) TimeoutException(java.util.concurrent.TimeoutException) Cleanup(lombok.Cleanup) LogAddress(io.pravega.segmentstore.storage.LogAddress) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) DataLogNotAvailableException(io.pravega.segmentstore.storage.DataLogNotAvailableException) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) CheckpointOperationBase(io.pravega.segmentstore.server.logs.operations.CheckpointOperationBase) InMemoryStorageFactory(io.pravega.segmentstore.storage.mocks.InMemoryStorageFactory) Duration(java.time.Duration) CachePolicy(io.pravega.segmentstore.server.CachePolicy) Operation(io.pravega.segmentstore.server.logs.operations.Operation) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) ServiceListeners(io.pravega.segmentstore.server.ServiceListeners) ContainerOfflineException(io.pravega.segmentstore.server.ContainerOfflineException) Predicate(java.util.function.Predicate) Collection(java.util.Collection) Set(java.util.Set) CompletionException(java.util.concurrent.CompletionException) Streams(com.google.common.collect.Streams) DataLogWriterNotPrimaryException(io.pravega.segmentstore.storage.DataLogWriterNotPrimaryException) Collectors(java.util.stream.Collectors) SegmentMetadataComparer(io.pravega.segmentstore.server.SegmentMetadataComparer) ErrorInjector(io.pravega.test.common.ErrorInjector) List(java.util.List) ByteArraySegment(io.pravega.common.util.ByteArraySegment) StreamSegmentContainerMetadata(io.pravega.segmentstore.server.containers.StreamSegmentContainerMetadata) DirectMemoryCache(io.pravega.segmentstore.storage.cache.DirectMemoryCache) TestUtils(io.pravega.test.common.TestUtils) Queue(java.util.Queue) Futures(io.pravega.common.concurrent.Futures) CacheManager(io.pravega.segmentstore.server.CacheManager) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) StreamSegmentException(io.pravega.segmentstore.contracts.StreamSegmentException) Exceptions(io.pravega.common.Exceptions) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) EvictableMetadata(io.pravega.segmentstore.server.EvictableMetadata) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) ReadIndexConfig(io.pravega.segmentstore.server.reading.ReadIndexConfig) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) Timeout(org.junit.rules.Timeout) DurableDataLogException(io.pravega.segmentstore.storage.DurableDataLogException) OperationComparer(io.pravega.segmentstore.server.logs.operations.OperationComparer) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) DataLogDisabledException(io.pravega.segmentstore.storage.DataLogDisabledException) Iterator(java.util.Iterator) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) OperationLog(io.pravega.segmentstore.server.OperationLog) IOException(java.io.IOException) Test(org.junit.Test) TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) Service(com.google.common.util.concurrent.Service) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) TimeUnit(java.util.concurrent.TimeUnit) AbstractMap(java.util.AbstractMap) Rule(org.junit.Rule) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Data(lombok.Data) ReadIndex(io.pravega.segmentstore.server.ReadIndex) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) Collections(java.util.Collections) CompositeArrayView(io.pravega.common.util.CompositeArrayView) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) InputStream(java.io.InputStream) DirectMemoryCache(io.pravega.segmentstore.storage.cache.DirectMemoryCache) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) ArrayList(java.util.ArrayList) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Cleanup(lombok.Cleanup) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) EvictableMetadata(io.pravega.segmentstore.server.EvictableMetadata) CacheManager(io.pravega.segmentstore.server.CacheManager) lombok.val(lombok.val) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) ReadIndex(io.pravega.segmentstore.server.ReadIndex) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) Storage(io.pravega.segmentstore.storage.Storage) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) Test(org.junit.Test)

Aggregations

MergeSegmentOperation (io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation)29 StreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation)19 lombok.val (lombok.val)16 StreamSegmentSealOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation)15 UpdateableSegmentMetadata (io.pravega.segmentstore.server.UpdateableSegmentMetadata)13 StorageOperation (io.pravega.segmentstore.server.logs.operations.StorageOperation)12 DeleteSegmentOperation (io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation)11 Operation (io.pravega.segmentstore.server.logs.operations.Operation)11 StreamSegmentTruncateOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation)11 DataCorruptionException (io.pravega.segmentstore.server.DataCorruptionException)10 CachedStreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation)10 ArrayList (java.util.ArrayList)10 StreamSegmentNotExistsException (io.pravega.segmentstore.contracts.StreamSegmentNotExistsException)9 SegmentMetadata (io.pravega.segmentstore.server.SegmentMetadata)9 SegmentProperties (io.pravega.segmentstore.contracts.SegmentProperties)8 StreamSegmentSealedException (io.pravega.segmentstore.contracts.StreamSegmentSealedException)8 SegmentOperation (io.pravega.segmentstore.server.SegmentOperation)8 Collectors (java.util.stream.Collectors)8 Exceptions (io.pravega.common.Exceptions)7 Futures (io.pravega.common.concurrent.Futures)7