Search in sources :

Example 21 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregatorTests method testAddWithBadInput.

/**
 * Tests the add() method with invalid arguments.
 */
@Test
public void testAddWithBadInput() throws Exception {
    final long badTransactionId = 12345;
    final long badParentId = 56789;
    final String badParentName = "Foo_Parent";
    final String badTransactionName = "Foo_Transaction";
    @Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
    // We only needs one Transaction for this test.
    SegmentAggregator transactionAggregator = context.transactionAggregators[0];
    SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.storage.create(transactionMetadata.getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT).join();
    transactionAggregator.initialize(TIMEOUT).join();
    // Create 2 more segments that can be used to verify MergeTransactionOperation.
    context.containerMetadata.mapStreamSegmentId(badParentName, badParentId);
    UpdateableSegmentMetadata badTransactionMetadata = context.containerMetadata.mapStreamSegmentId(badTransactionName, badTransactionId, badParentId);
    badTransactionMetadata.setLength(0);
    badTransactionMetadata.setStorageLength(0);
    context.storage.create(badTransactionMetadata.getName(), TIMEOUT).join();
    // 1. MergeTransactionOperation
    // 1a.Verify that MergeTransactionOperation cannot be added to the Transaction segment.
    AssertExtensions.assertThrows("add() allowed a MergeTransactionOperation on the Transaction segment.", () -> transactionAggregator.add(generateSimpleMergeTransaction(transactionMetadata.getId(), context)), ex -> ex instanceof IllegalArgumentException);
    // 1b. Verify that MergeTransactionOperation has the right parent.
    AssertExtensions.assertThrows("add() allowed a MergeTransactionOperation on the parent for a Transaction that did not have it as a parent.", () -> transactionAggregator.add(generateSimpleMergeTransaction(badTransactionId, context)), ex -> ex instanceof IllegalArgumentException);
    // 2. StreamSegmentSealOperation.
    // 2a. Verify we cannot add a StreamSegmentSealOperation if the segment is not sealed yet.
    AssertExtensions.assertThrows("add() allowed a StreamSegmentSealOperation for a non-sealed segment.", () -> {
        @Cleanup SegmentAggregator badTransactionAggregator = new SegmentAggregator(badTransactionMetadata, context.dataSource, context.storage, DEFAULT_CONFIG, context.timer, executorService());
        badTransactionAggregator.initialize(TIMEOUT).join();
        badTransactionAggregator.add(generateSimpleSeal(badTransactionId, context));
    }, ex -> ex instanceof DataCorruptionException);
    // 2b. Verify that nothing is allowed after Seal (after adding one append to and sealing the Transaction Segment).
    StorageOperation transactionAppend1 = generateAppendAndUpdateMetadata(0, transactionMetadata.getId(), context);
    transactionAggregator.add(transactionAppend1);
    transactionAggregator.add(generateSealAndUpdateMetadata(transactionMetadata.getId(), context));
    AssertExtensions.assertThrows("add() allowed operation after seal.", () -> transactionAggregator.add(generateSimpleAppend(transactionMetadata.getId(), context)), ex -> ex instanceof DataCorruptionException);
    // 3. CachedStreamSegmentAppendOperation.
    final StorageOperation parentAppend1 = generateAppendAndUpdateMetadata(0, SEGMENT_ID, context);
    // 3a. Verify we cannot add StreamSegmentAppendOperations.
    AssertExtensions.assertThrows("add() allowed a StreamSegmentAppendOperation.", () -> {
        // We have the correct offset, but we did not increase the Length.
        StreamSegmentAppendOperation badAppend = new StreamSegmentAppendOperation(parentAppend1.getStreamSegmentId(), parentAppend1.getStreamSegmentOffset(), new byte[(int) parentAppend1.getLength()], null);
        context.segmentAggregator.add(badAppend);
    }, ex -> ex instanceof IllegalArgumentException);
    // Add this one append to the parent (nothing unusual here); we'll use this for the next tests.
    context.segmentAggregator.add(parentAppend1);
    // 3b. Verify we cannot add anything beyond the DurableLogOffset (offset or offset+length).
    AssertExtensions.assertThrows("add() allowed an operation beyond the DurableLogOffset (offset).", () -> {
        // We have the correct offset, but we did not increase the Length.
        StreamSegmentAppendOperation badAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), "foo".getBytes(), null);
        badAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
        context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badAppend));
    }, ex -> ex instanceof DataCorruptionException);
    ((UpdateableSegmentMetadata) context.segmentAggregator.getMetadata()).setLength(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength() + 1);
    AssertExtensions.assertThrows("add() allowed an operation beyond the DurableLogOffset (offset+length).", () -> {
        // We have the correct offset, but we the append exceeds the Length by 1 byte.
        StreamSegmentAppendOperation badAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), "foo".getBytes(), null);
        badAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
        context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badAppend));
    }, ex -> ex instanceof DataCorruptionException);
    // 3c. Verify contiguity (offsets - we cannot have gaps in the data).
    AssertExtensions.assertThrows("add() allowed an operation with wrong offset (too small).", () -> {
        StreamSegmentAppendOperation badOffsetAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), "foo".getBytes(), null);
        badOffsetAppend.setStreamSegmentOffset(0);
        context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badOffsetAppend));
    }, ex -> ex instanceof DataCorruptionException);
    AssertExtensions.assertThrows("add() allowed an operation with wrong offset (too large).", () -> {
        StreamSegmentAppendOperation badOffsetAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), "foo".getBytes(), null);
        badOffsetAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength() + 1);
        context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badOffsetAppend));
    }, ex -> ex instanceof DataCorruptionException);
    AssertExtensions.assertThrows("add() allowed an operation with wrong offset (too large, but no pending operations).", () -> {
        @Cleanup SegmentAggregator badTransactionAggregator = new SegmentAggregator(badTransactionMetadata, context.dataSource, context.storage, DEFAULT_CONFIG, context.timer, executorService());
        badTransactionMetadata.setLength(100);
        badTransactionAggregator.initialize(TIMEOUT).join();
        StreamSegmentAppendOperation badOffsetAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), "foo".getBytes(), null);
        badOffsetAppend.setStreamSegmentOffset(1);
        context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badOffsetAppend));
    }, ex -> ex instanceof DataCorruptionException);
    // 4. Verify Segment Id match.
    AssertExtensions.assertThrows("add() allowed an Append operation with wrong Segment Id.", () -> {
        StreamSegmentAppendOperation badIdAppend = new StreamSegmentAppendOperation(Integer.MAX_VALUE, "foo".getBytes(), null);
        badIdAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
        context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badIdAppend));
    }, ex -> ex instanceof IllegalArgumentException);
    AssertExtensions.assertThrows("add() allowed a StreamSegmentSealOperation with wrong SegmentId.", () -> {
        StreamSegmentSealOperation badIdSeal = new StreamSegmentSealOperation(Integer.MAX_VALUE);
        badIdSeal.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
        context.segmentAggregator.add(badIdSeal);
    }, ex -> ex instanceof IllegalArgumentException);
    AssertExtensions.assertThrows("add() allowed a MergeTransactionOperation with wrong SegmentId.", () -> {
        MergeTransactionOperation badIdMerge = new MergeTransactionOperation(Integer.MAX_VALUE, transactionMetadata.getId());
        badIdMerge.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
        badIdMerge.setLength(1);
        context.segmentAggregator.add(badIdMerge);
    }, ex -> ex instanceof IllegalArgumentException);
    // 5. Truncations.
    AssertExtensions.assertThrows("add() allowed a StreamSegmentTruncateOperation with a truncation offset beyond the one in the metadata.", () -> {
        StreamSegmentTruncateOperation op = new StreamSegmentTruncateOperation(SEGMENT_ID, 10);
        op.setSequenceNumber(context.containerMetadata.nextOperationSequenceNumber());
        context.segmentAggregator.add(op);
    }, ex -> ex instanceof DataCorruptionException);
}
Also used : UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Cleanup(lombok.Cleanup) MergeTransactionOperation(io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Test(org.junit.Test)

Example 22 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregatorTests method testSegmentMissingData.

/**
 * Tests the case when a Segment's data is missing from the ReadIndex (but the Segment itself is not deleted).
 */
@Test
public void testSegmentMissingData() throws Exception {
    final WriterConfig config = DEFAULT_CONFIG;
    @Cleanup TestContext context = new TestContext(config);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT).join();
    // Add one operation big enough to trigger a Flush.
    byte[] appendData = new byte[config.getFlushThresholdBytes() + 1];
    StorageOperation appendOp = generateAppendAndUpdateMetadata(SEGMENT_ID, appendData, context);
    context.segmentAggregator.add(appendOp);
    Assert.assertTrue("Unexpected value returned by mustFlush() (size threshold).", context.segmentAggregator.mustFlush());
    // Clear the append data.
    context.dataSource.clearAppendData();
    // Call flush() and verify it throws DataCorruptionException.
    AssertExtensions.assertThrows("flush() did not throw when unable to read data from ReadIndex.", () -> context.segmentAggregator.flush(TIMEOUT), ex -> ex instanceof DataCorruptionException);
}
Also used : StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Example 23 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregatorTests method testMerge.

/**
 * Tests the flush() method with Append and MergeTransactionOperations.
 * Overall strategy:
 * 1. Create one Parent Segment and N Transaction Segments.
 * 2. Populate all Transaction Segments with data.
 * 3. Seal the first N/2 Transaction Segments.
 * 4. Add some Appends, interspersed with Merge Transaction Ops to the Parent (for all Transactions)
 * 5. Call flush() repeatedly on all Segments, until nothing is flushed anymore. Verify only the first N/2 Transactions were merged.
 * 6. Seal the remaining N/2 Transaction Segments
 * 7. Call flush() repeatedly on all Segments, until nothing is flushed anymore. Verify all Transactions were merged.
 * 8. Verify the Parent Segment has all the data (from itself and its Transactions), in the correct order.
 */
@Test
@SuppressWarnings("checkstyle:CyclomaticComplexity")
public void testMerge() throws Exception {
    // This is number of appends per Segment/Transaction - there will be a lot of appends here.
    final int appendCount = 100;
    final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
    appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
    @Cleanup TestContext context = new TestContext(config);
    // Create and initialize all segments.
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT).join();
    for (SegmentAggregator a : context.transactionAggregators) {
        context.storage.create(a.getMetadata().getName(), TIMEOUT).join();
        a.initialize(TIMEOUT).join();
    }
    // Store written data by segment - so we can check it later.
    HashMap<Long, ByteArrayOutputStream> dataBySegment = new HashMap<>();
    val actualMergeOpAck = new ArrayList<Map.Entry<Long, Long>>();
    context.dataSource.setCompleteMergeCallback((target, source) -> actualMergeOpAck.add(new AbstractMap.SimpleImmutableEntry<Long, Long>(target, source)));
    // Add a few appends to each Transaction aggregator and to the parent aggregator.
    // Seal the first half of the Transaction aggregators (thus, those Transactions will be fully flushed).
    HashSet<Long> sealedTransactionIds = new HashSet<>();
    for (int i = 0; i < context.transactionAggregators.length; i++) {
        SegmentAggregator transactionAggregator = context.transactionAggregators[i];
        long transactionId = transactionAggregator.getMetadata().getId();
        ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
        dataBySegment.put(transactionId, writtenData);
        for (int appendId = 0; appendId < appendCount; appendId++) {
            StorageOperation appendOp = generateAppendAndUpdateMetadata(appendId, transactionId, context);
            transactionAggregator.add(appendOp);
            getAppendData(appendOp, writtenData, context);
        }
        if (i < context.transactionAggregators.length / 2) {
            // We only seal the first half.
            transactionAggregator.add(generateSealAndUpdateMetadata(transactionId, context));
            sealedTransactionIds.add(transactionId);
        }
    }
    // Add MergeTransactionOperations to the parent aggregator, making sure we have both the following cases:
    // * Two or more consecutive MergeTransactionOperations both for Transactions that are sealed and for those that are not.
    // * MergeTransactionOperations with appends interspersed between them (in the parent), both for sealed Transactions and non-sealed Transactions.
    long parentSegmentId = context.segmentAggregator.getMetadata().getId();
    @Cleanup ByteArrayOutputStream parentData = new ByteArrayOutputStream();
    for (int transIndex = 0; transIndex < context.transactionAggregators.length; transIndex++) {
        // This helps ensure that we have both interspersed appends, and consecutive MergeTransactionOperations in the parent.
        if (transIndex % 2 == 1) {
            StorageOperation appendOp = generateAppendAndUpdateMetadata(transIndex, parentSegmentId, context);
            context.segmentAggregator.add(appendOp);
            getAppendData(appendOp, parentData, context);
        }
        // Merge this Transaction into the parent & record its data in the final parent data array.
        long transactionId = context.transactionAggregators[transIndex].getMetadata().getId();
        context.segmentAggregator.add(generateMergeTransactionAndUpdateMetadata(transactionId, context));
        ByteArrayOutputStream transactionData = dataBySegment.get(transactionId);
        parentData.write(transactionData.toByteArray());
        transactionData.close();
    }
    // Flush all the Aggregators as long as at least one of them reports being able to flush and that it did flush something.
    flushAllSegments(context);
    // Now check to see that only those Transactions that were sealed were merged.
    for (SegmentAggregator transactionAggregator : context.transactionAggregators) {
        SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
        boolean expectedMerged = sealedTransactionIds.contains(transactionMetadata.getId());
        if (expectedMerged) {
            Assert.assertTrue("Transaction to be merged was not marked as deleted in metadata.", transactionMetadata.isDeleted());
            Assert.assertFalse("Transaction to be merged still exists in storage.", context.storage.exists(transactionMetadata.getName(), TIMEOUT).join());
        } else {
            Assert.assertFalse("Transaction not to be merged was marked as deleted in metadata.", transactionMetadata.isDeleted());
            SegmentProperties sp = context.storage.getStreamSegmentInfo(transactionMetadata.getName(), TIMEOUT).join();
            Assert.assertFalse("Transaction not to be merged is sealed in storage.", sp.isSealed());
        }
    }
    // Then seal the rest of the Transactions and re-run the flush on the parent a few times.
    for (SegmentAggregator a : context.transactionAggregators) {
        long transactionId = a.getMetadata().getId();
        if (!sealedTransactionIds.contains(transactionId)) {
            // This Transaction was not sealed (and merged) previously. Do it now.
            a.add(generateSealAndUpdateMetadata(transactionId, context));
            sealedTransactionIds.add(transactionId);
        }
    }
    // Flush all the Aggregators as long as at least one of them reports being able to flush and that it did flush something.
    flushAllSegments(context);
    // Verify that all Transactions are now fully merged.
    for (SegmentAggregator transactionAggregator : context.transactionAggregators) {
        SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
        Assert.assertTrue("Merged Transaction was not marked as deleted in metadata.", transactionMetadata.isDeleted());
        Assert.assertFalse("Merged Transaction still exists in storage.", context.storage.exists(transactionMetadata.getName(), TIMEOUT).join());
    }
    // Verify that in the end, the contents of the parents is as expected.
    verifySegmentData(parentData.toByteArray(), context);
    // Verify calls to completeMerge.
    val expectedMergeOpSources = Arrays.stream(context.transactionAggregators).map(a -> a.getMetadata().getId()).collect(Collectors.toSet());
    Assert.assertEquals("Unexpected number of calls to completeMerge.", expectedMergeOpSources.size(), actualMergeOpAck.size());
    val actualMergeOpSources = actualMergeOpAck.stream().map(Map.Entry::getValue).collect(Collectors.toSet());
    AssertExtensions.assertContainsSameElements("Unexpected sources for invocation to completeMerge.", expectedMergeOpSources, actualMergeOpSources);
    for (Map.Entry<Long, Long> e : actualMergeOpAck) {
        Assert.assertEquals("Unexpected target for invocation to completeMerge.", context.segmentAggregator.getMetadata().getId(), (long) e.getKey());
    }
}
Also used : lombok.val(lombok.val) Arrays(java.util.Arrays) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AssertExtensions(io.pravega.test.common.AssertExtensions) RequiredArgsConstructor(lombok.RequiredArgsConstructor) Cleanup(lombok.Cleanup) Random(java.util.Random) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) ByteArrayInputStream(java.io.ByteArrayInputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Duration(java.time.Duration) Map(java.util.Map) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) InMemoryStorage(io.pravega.segmentstore.storage.mocks.InMemoryStorage) Collectors(java.util.stream.Collectors) ErrorInjector(io.pravega.test.common.ErrorInjector) IOUtils(org.apache.commons.io.IOUtils) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) TestStorage(io.pravega.segmentstore.server.TestStorage) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Exceptions(io.pravega.common.Exceptions) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) Timeout(org.junit.rules.Timeout) OutputStream(java.io.OutputStream) ManualTimer(io.pravega.segmentstore.server.ManualTimer) FixedByteArrayOutputStream(io.pravega.common.io.FixedByteArrayOutputStream) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) AbstractMap(java.util.AbstractMap) Rule(org.junit.Rule) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) TreeMap(java.util.TreeMap) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) MergeTransactionOperation(io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) InputStream(java.io.InputStream) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ByteArrayOutputStream(java.io.ByteArrayOutputStream) FixedByteArrayOutputStream(io.pravega.common.io.FixedByteArrayOutputStream) Cleanup(lombok.Cleanup) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) AtomicLong(java.util.concurrent.atomic.AtomicLong) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) Map(java.util.Map) HashMap(java.util.HashMap) AbstractMap(java.util.AbstractMap) TreeMap(java.util.TreeMap) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 24 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregatorTests method testProgressiveReconcile.

/**
 * Tests the ability of the SegmentAggregator to reconcile operations as they are added to it (it detected a possible
 * data corruption, but it does not yet have all the operations it needs to reconcile - it needs to stay in reconciliation
 * mode until all disagreements have been resolved).
 */
@Test
public void testProgressiveReconcile() throws Exception {
    final WriterConfig config = DEFAULT_CONFIG;
    final int appendCount = 1000;
    final int failEvery = 3;
    final int maxFlushLoopCount = 5;
    @Cleanup TestContext context = new TestContext(config);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT).join();
    @Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
    ArrayList<StorageOperation> appendOperations = new ArrayList<>();
    ArrayList<InputStream> appendData = new ArrayList<>();
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        appendOperations.add(appendOp);
        byte[] ad = new byte[(int) appendOp.getLength()];
        getAppendData(appendOp, new FixedByteArrayOutputStream(ad, 0, ad.length), context);
        appendData.add(new ByteArrayInputStream(ad));
        writtenData.write(ad);
    }
    // Add each operation at at time, and every X appends, write ahead to storage (X-1 appends). This will force a
    // good mix of reconciles and normal appends.
    int errorCount = 0;
    int flushCount = 0;
    for (int i = 0; i < appendOperations.size(); i++) {
        StorageOperation op = appendOperations.get(i);
        context.segmentAggregator.add(op);
        if (i % failEvery == 0) {
            // Corrupt the storage by adding the next failEvery-1 ops to Storage.
            for (int j = i; j < i + failEvery - 1 && j < appendOperations.size(); j++) {
                long offset = context.storage.getStreamSegmentInfo(SEGMENT_NAME, TIMEOUT).join().getLength();
                context.storage.write(writeHandle(SEGMENT_NAME), offset, appendData.get(j), appendData.get(j).available(), TIMEOUT).join();
            }
        }
        // Force a flush by incrementing the time by a lot.
        context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
        int flushLoopCount = 0;
        while (context.segmentAggregator.mustFlush()) {
            try {
                flushCount++;
                context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
            } catch (Exception ex) {
                errorCount++;
                Assert.assertTrue("", Exceptions.unwrap(ex) instanceof BadOffsetException);
            }
            flushLoopCount++;
            AssertExtensions.assertLessThan("Too many flush-loops for a single attempt.", maxFlushLoopCount, flushLoopCount);
        }
    }
    AssertExtensions.assertGreaterThan("At least one flush was expected.", 0, flushCount);
    AssertExtensions.assertGreaterThan("At least one BadOffsetException was expected.", 0, errorCount);
    // Verify data.
    byte[] expectedData = writtenData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
    Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
    context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
Also used : FixedByteArrayOutputStream(io.pravega.common.io.FixedByteArrayOutputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) ByteArrayOutputStream(java.io.ByteArrayOutputStream) FixedByteArrayOutputStream(io.pravega.common.io.FixedByteArrayOutputStream) Cleanup(lombok.Cleanup) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) IntentionalException(io.pravega.test.common.IntentionalException) IOException(java.io.IOException) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) ByteArrayInputStream(java.io.ByteArrayInputStream) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) Test(org.junit.Test)

Example 25 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregatorTests method testRecovery.

// endregion
// region Recovery
/**
 * Tests a scenario where data that is about to be added already exists in Storage. This would most likely happen
 * in a recovery situation, where we committed the data but did not properly ack/truncate it from the DataSource.
 */
@Test
@SuppressWarnings("checkstyle:CyclomaticComplexity")
public void testRecovery() throws Exception {
    final int appendCount = 100;
    @Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    for (SegmentAggregator a : context.transactionAggregators) {
        context.storage.create(a.getMetadata().getName(), TIMEOUT).join();
    }
    // Store written data by segment - so we can check it later.
    HashMap<Long, ByteArrayOutputStream> dataBySegment = new HashMap<>();
    ArrayList<StorageOperation> operations = new ArrayList<>();
    val expectedMergeOpAck = new ArrayList<Map.Entry<Long, Long>>();
    // Create a segment and all its Transactions (do not initialize yet).
    ByteArrayOutputStream parentData = new ByteArrayOutputStream();
    dataBySegment.put(context.segmentAggregator.getMetadata().getId(), parentData);
    // All Transactions have appends (First 1/3 of Transactions just have appends, that exist in Storage as well)
    for (int i = 0; i < context.transactionAggregators.length; i++) {
        SegmentAggregator transactionAggregator = context.transactionAggregators[i];
        long transactionId = transactionAggregator.getMetadata().getId();
        ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
        dataBySegment.put(transactionId, writtenData);
        for (int appendId = 0; appendId < appendCount; appendId++) {
            StorageOperation appendOp = generateAppendAndUpdateMetadata(appendId, transactionId, context);
            operations.add(appendOp);
            getAppendData(appendOp, writtenData, context);
        }
        // Second and third 1/3s of Transactions are sealed, with the seals in storage, but we'll still add them.
        boolean isSealed = i >= context.transactionAggregators.length / 3;
        if (isSealed) {
            operations.add(generateSealAndUpdateMetadata(transactionId, context));
        }
        // Last 1/3 of Transactions are also merged.
        boolean isMerged = isSealed && (i >= context.transactionAggregators.length * 2 / 3);
        if (isMerged) {
            operations.add(generateMergeTransactionAndUpdateMetadata(transactionId, context));
            ByteArrayOutputStream transactionData = dataBySegment.get(transactionId);
            parentData.write(transactionData.toByteArray());
            transactionData.close();
            dataBySegment.remove(transactionId);
            expectedMergeOpAck.add(new AbstractMap.SimpleImmutableEntry<>(context.segmentAggregator.getMetadata().getId(), transactionId));
        }
    }
    // Populate the storage.
    for (Map.Entry<Long, ByteArrayOutputStream> e : dataBySegment.entrySet()) {
        context.storage.write(writeHandle(context.containerMetadata.getStreamSegmentMetadata(e.getKey()).getName()), 0, new ByteArrayInputStream(e.getValue().toByteArray()), e.getValue().size(), TIMEOUT).join();
    }
    for (SegmentAggregator a : context.transactionAggregators) {
        if (a.getMetadata().isSealed()) {
            context.storage.seal(writeHandle(a.getMetadata().getName()), TIMEOUT).join();
        }
        if (a.getMetadata().isMerged() || a.getMetadata().isDeleted()) {
            context.storage.delete(writeHandle(a.getMetadata().getName()), TIMEOUT).join();
        }
    }
    // Now initialize the SegmentAggregators
    context.segmentAggregator.initialize(TIMEOUT).join();
    for (SegmentAggregator a : context.transactionAggregators) {
        a.initialize(TIMEOUT).join();
    }
    // Add all operations we had so far.
    val actualMergeOpAck = new ArrayList<Map.Entry<Long, Long>>();
    context.dataSource.setCompleteMergeCallback((target, source) -> actualMergeOpAck.add(new AbstractMap.SimpleImmutableEntry<Long, Long>(target, source)));
    for (StorageOperation o : operations) {
        int transactionIndex = (int) (o.getStreamSegmentId() - TRANSACTION_ID_START);
        SegmentAggregator a = transactionIndex < 0 ? context.segmentAggregator : context.transactionAggregators[transactionIndex];
        a.add(o);
    }
    context.dataSource.setCompleteMergeCallback(null);
    // And now finish up the operations (merge all Transactions).
    for (SegmentAggregator a : context.transactionAggregators) {
        if (!a.getMetadata().isSealed()) {
            a.add(generateSealAndUpdateMetadata(a.getMetadata().getId(), context));
        }
        if (!a.getMetadata().isMerged()) {
            context.segmentAggregator.add(generateMergeTransactionAndUpdateMetadata(a.getMetadata().getId(), context));
            ByteArrayOutputStream transactionData = dataBySegment.get(a.getMetadata().getId());
            parentData.write(transactionData.toByteArray());
            transactionData.close();
            dataBySegment.remove(a.getMetadata().getId());
        }
    }
    flushAllSegments(context);
    // Verify that in the end, the contents of the parents is as expected.
    verifySegmentData(parentData.toByteArray(), context);
    AssertExtensions.assertListEquals("Unexpected callback calls to completeMerge for already processed operations.", expectedMergeOpAck, actualMergeOpAck, Map.Entry::equals);
}
Also used : lombok.val(lombok.val) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ByteArrayOutputStream(java.io.ByteArrayOutputStream) FixedByteArrayOutputStream(io.pravega.common.io.FixedByteArrayOutputStream) Cleanup(lombok.Cleanup) AbstractMap(java.util.AbstractMap) ByteArrayInputStream(java.io.ByteArrayInputStream) AtomicLong(java.util.concurrent.atomic.AtomicLong) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) Map(java.util.Map) HashMap(java.util.HashMap) AbstractMap(java.util.AbstractMap) TreeMap(java.util.TreeMap) Test(org.junit.Test)

Aggregations

StorageOperation (io.pravega.segmentstore.server.logs.operations.StorageOperation)34 Test (org.junit.Test)24 Cleanup (lombok.Cleanup)21 DataCorruptionException (io.pravega.segmentstore.server.DataCorruptionException)15 UpdateableSegmentMetadata (io.pravega.segmentstore.server.UpdateableSegmentMetadata)15 FixedByteArrayOutputStream (io.pravega.common.io.FixedByteArrayOutputStream)14 StreamSegmentNotExistsException (io.pravega.segmentstore.contracts.StreamSegmentNotExistsException)14 MergeTransactionOperation (io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation)14 ByteArrayOutputStream (java.io.ByteArrayOutputStream)14 ArrayList (java.util.ArrayList)14 AtomicLong (java.util.concurrent.atomic.AtomicLong)14 StreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation)13 lombok.val (lombok.val)13 SegmentProperties (io.pravega.segmentstore.contracts.SegmentProperties)12 SegmentMetadata (io.pravega.segmentstore.server.SegmentMetadata)12 InputStream (java.io.InputStream)12 BadOffsetException (io.pravega.segmentstore.contracts.BadOffsetException)11 CachedStreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation)11 Operation (io.pravega.segmentstore.server.logs.operations.Operation)11 IntentionalException (io.pravega.test.common.IntentionalException)11