Search in sources :

Example 1 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregatorTests method testReconcileTruncate.

/**
 * Tests the ability of the SegmentAggregator to reconcile StreamSegmentTruncateOperations.
 */
@Test
public void testReconcileTruncate() throws Exception {
    val rnd = new Random(0);
    byte[] storageData = new byte[100];
    rnd.nextBytes(storageData);
    // Write some data to the segment in Storage.
    @Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT).join();
    context.storage.openWrite(context.segmentAggregator.getMetadata().getName()).thenCompose(h -> context.storage.write(h, 0, new ByteArrayInputStream(storageData), storageData.length, TIMEOUT)).join();
    val sm = context.containerMetadata.getStreamSegmentMetadata(context.segmentAggregator.getMetadata().getId());
    sm.setLength(storageData.length);
    sm.setStorageLength(storageData.length);
    // The truncate succeeds, but we throw some random error, indicating that it didn't.
    context.storage.setTruncateInterceptor((segmentName, offset, storage) -> {
        context.storage.truncateDirectly(writeHandle(segmentName), offset);
        throw new IntentionalException(String.format("S=%s", segmentName));
    });
    // Attempt to seal.
    StorageOperation truncateOp = generateTruncateAndUpdateMetadata(SEGMENT_ID, context);
    context.segmentAggregator.add(truncateOp);
    // First time: attempt to flush/truncate, which must end in failure.
    AssertExtensions.assertThrows("IntentionalException did not propagate to flush() caller.", () -> context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS), ex -> Exceptions.unwrap(ex) instanceof IntentionalException);
    context.storage.setTruncateInterceptor(null);
    // Second time: we are in reconciliation mode, so flush must succeed (and update internal state based on storage).
    context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    // Verify outcome.
    Assert.assertEquals("Unexpected truncation offset in Storage.", truncateOp.getStreamSegmentOffset(), context.storage.getTruncationOffset(context.segmentAggregator.getMetadata().getName()));
}
Also used : lombok.val(lombok.val) Arrays(java.util.Arrays) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AssertExtensions(io.pravega.test.common.AssertExtensions) RequiredArgsConstructor(lombok.RequiredArgsConstructor) Cleanup(lombok.Cleanup) Random(java.util.Random) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) ByteArrayInputStream(java.io.ByteArrayInputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Duration(java.time.Duration) Map(java.util.Map) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) InMemoryStorage(io.pravega.segmentstore.storage.mocks.InMemoryStorage) Collectors(java.util.stream.Collectors) ErrorInjector(io.pravega.test.common.ErrorInjector) IOUtils(org.apache.commons.io.IOUtils) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) TestStorage(io.pravega.segmentstore.server.TestStorage) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Exceptions(io.pravega.common.Exceptions) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) Timeout(org.junit.rules.Timeout) OutputStream(java.io.OutputStream) ManualTimer(io.pravega.segmentstore.server.ManualTimer) FixedByteArrayOutputStream(io.pravega.common.io.FixedByteArrayOutputStream) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) AbstractMap(java.util.AbstractMap) Rule(org.junit.Rule) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) TreeMap(java.util.TreeMap) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) MergeTransactionOperation(io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) InputStream(java.io.InputStream) Random(java.util.Random) ByteArrayInputStream(java.io.ByteArrayInputStream) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) Cleanup(lombok.Cleanup) IntentionalException(io.pravega.test.common.IntentionalException) Test(org.junit.Test)

Example 2 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregatorTests method testMergeWithStorageErrors.

/**
 * Tests the flush() method with Append and MergeTransactionOperations.
 */
@Test
public void testMergeWithStorageErrors() throws Exception {
    // Storage Errors
    // This is number of appends per Segment/Transaction - there will be a lot of appends here.
    final int appendCount = 100;
    final int failSyncEvery = 2;
    final int failAsyncEvery = 3;
    final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
    appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
    @Cleanup TestContext context = new TestContext(config);
    // Create and initialize all segments.
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT).join();
    for (SegmentAggregator a : context.transactionAggregators) {
        context.storage.create(a.getMetadata().getName(), TIMEOUT).join();
        a.initialize(TIMEOUT).join();
    }
    // Store written data by segment - so we can check it later.
    HashMap<Long, ByteArrayOutputStream> dataBySegment = new HashMap<>();
    // Add a few appends to each Transaction aggregator and to the parent aggregator and seal all Transactions.
    for (int i = 0; i < context.transactionAggregators.length; i++) {
        SegmentAggregator transactionAggregator = context.transactionAggregators[i];
        long transactionId = transactionAggregator.getMetadata().getId();
        ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
        dataBySegment.put(transactionId, writtenData);
        for (int appendId = 0; appendId < appendCount; appendId++) {
            StorageOperation appendOp = generateAppendAndUpdateMetadata(appendId, transactionId, context);
            transactionAggregator.add(appendOp);
            getAppendData(appendOp, writtenData, context);
        }
        transactionAggregator.add(generateSealAndUpdateMetadata(transactionId, context));
    }
    // Merge all the Transactions in the parent Segment.
    @Cleanup ByteArrayOutputStream parentData = new ByteArrayOutputStream();
    for (int transIndex = 0; transIndex < context.transactionAggregators.length; transIndex++) {
        // Merge this Transaction into the parent & record its data in the final parent data array.
        long transactionId = context.transactionAggregators[transIndex].getMetadata().getId();
        context.segmentAggregator.add(generateMergeTransactionAndUpdateMetadata(transactionId, context));
        ByteArrayOutputStream transactionData = dataBySegment.get(transactionId);
        parentData.write(transactionData.toByteArray());
        transactionData.close();
    }
    // Have the writes fail every few attempts with a well known exception.
    AtomicReference<IntentionalException> setException = new AtomicReference<>();
    Supplier<Exception> exceptionSupplier = () -> {
        IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis()));
        setException.set(ex);
        return ex;
    };
    context.storage.setConcatSyncErrorInjector(new ErrorInjector<>(count -> count % failSyncEvery == 0, exceptionSupplier));
    context.storage.setConcatAsyncErrorInjector(new ErrorInjector<>(count -> count % failAsyncEvery == 0, exceptionSupplier));
    // Flush all the Aggregators, while checking that the right errors get handled and can be recovered from.
    tryFlushAllSegments(context, () -> setException.set(null), setException::get);
    // Verify that all Transactions are now fully merged.
    for (SegmentAggregator transactionAggregator : context.transactionAggregators) {
        SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
        Assert.assertTrue("Merged Transaction was not marked as deleted in metadata.", transactionMetadata.isDeleted());
        Assert.assertFalse("Merged Transaction still exists in storage.", context.storage.exists(transactionMetadata.getName(), TIMEOUT).join());
    }
    // Verify that in the end, the contents of the parents is as expected.
    byte[] expectedData = parentData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
    Assert.assertEquals("Unexpected number of bytes flushed/merged to Storage.", expectedData.length, storageLength);
    context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
Also used : Arrays(java.util.Arrays) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AssertExtensions(io.pravega.test.common.AssertExtensions) RequiredArgsConstructor(lombok.RequiredArgsConstructor) Cleanup(lombok.Cleanup) Random(java.util.Random) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) ByteArrayInputStream(java.io.ByteArrayInputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Duration(java.time.Duration) Map(java.util.Map) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) InMemoryStorage(io.pravega.segmentstore.storage.mocks.InMemoryStorage) Collectors(java.util.stream.Collectors) ErrorInjector(io.pravega.test.common.ErrorInjector) IOUtils(org.apache.commons.io.IOUtils) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) TestStorage(io.pravega.segmentstore.server.TestStorage) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Exceptions(io.pravega.common.Exceptions) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) Timeout(org.junit.rules.Timeout) OutputStream(java.io.OutputStream) ManualTimer(io.pravega.segmentstore.server.ManualTimer) FixedByteArrayOutputStream(io.pravega.common.io.FixedByteArrayOutputStream) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) AbstractMap(java.util.AbstractMap) Rule(org.junit.Rule) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) TreeMap(java.util.TreeMap) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) MergeTransactionOperation(io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) InputStream(java.io.InputStream) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) ByteArrayOutputStream(java.io.ByteArrayOutputStream) FixedByteArrayOutputStream(io.pravega.common.io.FixedByteArrayOutputStream) Cleanup(lombok.Cleanup) IntentionalException(io.pravega.test.common.IntentionalException) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) IntentionalException(io.pravega.test.common.IntentionalException) IOException(java.io.IOException) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) AtomicLong(java.util.concurrent.atomic.AtomicLong) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) Test(org.junit.Test)

Example 3 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregatorTests method testRecoveryPartialWrite.

/**
 * Tests a scenario where data that is about to be added already partially exists in Storage. This would most likely
 * happen in a recovery situation, where we committed a part of an append operation before failing over.
 */
@Test
public void testRecoveryPartialWrite() throws Exception {
    final int writeLength = 1024;
    final int partialWriteLength = writeLength / 2;
    @Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    // Store written data by segment - so we can check it later.
    ArrayList<StorageOperation> operations = new ArrayList<>();
    byte[] writtenData = new byte[writeLength];
    val rnd = new Random(0);
    rnd.nextBytes(writtenData);
    StorageOperation appendOp = generateAppendAndUpdateMetadata(context.segmentAggregator.getMetadata().getId(), writtenData, context);
    operations.add(appendOp);
    operations.add(generateSealAndUpdateMetadata(context.segmentAggregator.getMetadata().getId(), context));
    // Write half of the data to Storage.
    context.storage.write(writeHandle(context.segmentAggregator.getMetadata().getName()), 0, new ByteArrayInputStream(writtenData), partialWriteLength, TIMEOUT).join();
    // Initialize the SegmentAggregator. This should pick up the half-written operation.
    context.segmentAggregator.initialize(TIMEOUT).join();
    Assert.assertEquals("", partialWriteLength, context.segmentAggregator.getMetadata().getStorageLength());
    // Add all operations we had so far.
    for (StorageOperation o : operations) {
        context.segmentAggregator.add(o);
    }
    flushAllSegments(context);
    // Verify that in the end, the contents of the parents is as expected.
    verifySegmentData(writtenData, context);
}
Also used : lombok.val(lombok.val) Random(java.util.Random) ByteArrayInputStream(java.io.ByteArrayInputStream) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) ArrayList(java.util.ArrayList) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Example 4 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregatorTests method testSealWithStorageErrors.

/**
 * Tests the flush() method with Append and StreamSegmentSealOperations when there are Storage errors.
 */
@Test
public void testSealWithStorageErrors() throws Exception {
    // Add some appends and seal, and then flush together. Verify that everything got flushed in one go.
    final int appendCount = 1000;
    final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
    appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
    @Cleanup TestContext context = new TestContext(config);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT).join();
    @Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
    // Part 1: flush triggered by accumulated size.
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length (not bothering with flushing here; testFlushSeal() covers that).
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);
    }
    // Generate and add a Seal Operation.
    StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context);
    context.segmentAggregator.add(sealOp);
    // Have the writes fail every few attempts with a well known exception.
    AtomicBoolean generateSyncException = new AtomicBoolean(true);
    AtomicBoolean generateAsyncException = new AtomicBoolean(true);
    AtomicReference<IntentionalException> setException = new AtomicReference<>();
    Supplier<Exception> exceptionSupplier = () -> {
        IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis()));
        setException.set(ex);
        return ex;
    };
    context.storage.setSealSyncErrorInjector(new ErrorInjector<>(count -> generateSyncException.getAndSet(false), exceptionSupplier));
    context.storage.setSealAsyncErrorInjector(new ErrorInjector<>(count -> generateAsyncException.getAndSet(false), exceptionSupplier));
    // Call flush and verify that the entire Aggregator got flushed and the Seal got persisted to Storage.
    int attemptCount = 4;
    for (int i = 0; i < attemptCount; i++) {
        // Repeat a number of times, at least once should work.
        setException.set(null);
        try {
            FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
            Assert.assertNull("An exception was expected, but none was thrown.", setException.get());
            Assert.assertNotNull("No FlushResult provided.", flushResult);
        } catch (Exception ex) {
            if (setException.get() != null) {
                Assert.assertEquals("Unexpected exception thrown.", setException.get(), Exceptions.unwrap(ex));
            } else {
                // Not expecting any exception this time.
                throw ex;
            }
        }
        if (!generateAsyncException.get() && !generateSyncException.get() && setException.get() == null) {
            // We are done. We got at least one through.
            break;
        }
    }
    // Verify data.
    byte[] expectedData = writtenData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    SegmentProperties storageInfo = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageInfo.getLength());
    Assert.assertTrue("Segment is not sealed in storage post flush.", storageInfo.isSealed());
    Assert.assertTrue("Segment is not marked in metadata as sealed in storage post flush.", context.segmentAggregator.getMetadata().isSealedInStorage());
    context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
Also used : Arrays(java.util.Arrays) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AssertExtensions(io.pravega.test.common.AssertExtensions) RequiredArgsConstructor(lombok.RequiredArgsConstructor) Cleanup(lombok.Cleanup) Random(java.util.Random) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) ByteArrayInputStream(java.io.ByteArrayInputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Duration(java.time.Duration) Map(java.util.Map) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) InMemoryStorage(io.pravega.segmentstore.storage.mocks.InMemoryStorage) Collectors(java.util.stream.Collectors) ErrorInjector(io.pravega.test.common.ErrorInjector) IOUtils(org.apache.commons.io.IOUtils) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) TestStorage(io.pravega.segmentstore.server.TestStorage) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Exceptions(io.pravega.common.Exceptions) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) Timeout(org.junit.rules.Timeout) OutputStream(java.io.OutputStream) ManualTimer(io.pravega.segmentstore.server.ManualTimer) FixedByteArrayOutputStream(io.pravega.common.io.FixedByteArrayOutputStream) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) AbstractMap(java.util.AbstractMap) Rule(org.junit.Rule) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) TreeMap(java.util.TreeMap) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) MergeTransactionOperation(io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) InputStream(java.io.InputStream) AtomicReference(java.util.concurrent.atomic.AtomicReference) ByteArrayOutputStream(java.io.ByteArrayOutputStream) FixedByteArrayOutputStream(io.pravega.common.io.FixedByteArrayOutputStream) Cleanup(lombok.Cleanup) IntentionalException(io.pravega.test.common.IntentionalException) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) IntentionalException(io.pravega.test.common.IntentionalException) IOException(java.io.IOException) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) Test(org.junit.Test)

Example 5 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregatorTests method testFlushAppend.

// endregion
// region flush()
/**
 * Tests the flush() method only with Append operations.
 * Verifies both length-based and time-based flush triggers, as well as flushing rather large operations.
 */
@Test
public void testFlushAppend() throws Exception {
    final WriterConfig config = DEFAULT_CONFIG;
    final int appendCount = config.getFlushThresholdBytes() * 10;
    @Cleanup TestContext context = new TestContext(config);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT).join();
    @Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
    // Number of bytes remaining to be flushed.
    AtomicLong outstandingSize = new AtomicLong();
    SequenceNumberCalculator sequenceNumbers = new SequenceNumberCalculator(context, outstandingSize);
    // Part 1: flush triggered by accumulated size.
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        outstandingSize.addAndGet(appendOp.getLength());
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);
        sequenceNumbers.record(appendOp);
        boolean expectFlush = outstandingSize.get() >= config.getFlushThresholdBytes();
        Assert.assertEquals("Unexpected value returned by mustFlush() (size threshold).", expectFlush, context.segmentAggregator.mustFlush());
        Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (size threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
        // Call flush() and inspect the result.
        FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
        if (expectFlush) {
            AssertExtensions.assertGreaterThanOrEqual("Not enough bytes were flushed (size threshold).", config.getFlushThresholdBytes(), flushResult.getFlushedBytes());
            outstandingSize.addAndGet(-flushResult.getFlushedBytes());
            Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (size threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
        } else {
            Assert.assertEquals(String.format("Not expecting a flush. OutstandingSize=%s, Threshold=%d", outstandingSize, config.getFlushThresholdBytes()), 0, flushResult.getFlushedBytes());
        }
        Assert.assertFalse("Unexpected value returned by mustFlush() after flush (size threshold).", context.segmentAggregator.mustFlush());
        Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
    }
    // Part 2: flush triggered by time.
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        outstandingSize.addAndGet(appendOp.getLength());
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);
        sequenceNumbers.record(appendOp);
        // Call flush() and inspect the result.
        // Force a flush by incrementing the time by a lot.
        context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
        Assert.assertTrue("Unexpected value returned by mustFlush() (time threshold).", context.segmentAggregator.mustFlush());
        Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (time threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
        FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
        // We are always expecting a flush.
        AssertExtensions.assertGreaterThan("Not enough bytes were flushed (time threshold).", 0, flushResult.getFlushedBytes());
        outstandingSize.addAndGet(-flushResult.getFlushedBytes());
        Assert.assertFalse("Unexpected value returned by mustFlush() after flush (time threshold).", context.segmentAggregator.mustFlush());
        Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (time threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
        Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
    }
    // Part 3: Transaction appends. This will force an internal loop inside flush() to do so repeatedly.
    final int transactionSize = 100;
    for (int i = 0; i < appendCount / 10; i++) {
        for (int j = 0; j < transactionSize; j++) {
            // Add another operation and record its length.
            StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
            outstandingSize.addAndGet(appendOp.getLength());
            context.segmentAggregator.add(appendOp);
            getAppendData(appendOp, writtenData, context);
            sequenceNumbers.record(appendOp);
            Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (Transaction appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
        }
        // Call flush() and inspect the result.
        Assert.assertTrue("Unexpected value returned by mustFlush() (Transaction appends).", context.segmentAggregator.mustFlush());
        FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
        // We are always expecting a flush.
        AssertExtensions.assertGreaterThan("Not enough bytes were flushed (Transaction appends).", 0, flushResult.getFlushedBytes());
        outstandingSize.addAndGet(-flushResult.getFlushedBytes());
        Assert.assertFalse("Unexpected value returned by mustFlush() after flush (Transaction appends).", context.segmentAggregator.mustFlush());
        Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (Transaction appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
        Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
    }
    // Part 4: large appends (larger than MaxFlushSize).
    Random random = new Random();
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        byte[] largeAppendData = new byte[config.getMaxFlushSizeBytes() * 10 + 1];
        random.nextBytes(largeAppendData);
        StorageOperation appendOp = generateAppendAndUpdateMetadata(SEGMENT_ID, largeAppendData, context);
        outstandingSize.addAndGet(appendOp.getLength());
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);
        sequenceNumbers.record(appendOp);
        // Call flush() and inspect the result.
        // Force a flush by incrementing the time by a lot.
        context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
        Assert.assertTrue("Unexpected value returned by mustFlush() (large appends).", context.segmentAggregator.mustFlush());
        Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (large appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
        FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
        // We are always expecting a flush.
        AssertExtensions.assertGreaterThan("Not enough bytes were flushed (large appends).", 0, flushResult.getFlushedBytes());
        outstandingSize.addAndGet(-flushResult.getFlushedBytes());
        Assert.assertFalse("Unexpected value returned by mustFlush() after flush (time threshold).", context.segmentAggregator.mustFlush());
        Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (large appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
        Assert.assertEquals("Not expecting any merged bytes in this test (large appends).", 0, flushResult.getMergedBytes());
    }
    // Verify data.
    Assert.assertEquals("Not expecting leftover data not flushed.", 0, outstandingSize.get());
    byte[] expectedData = writtenData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
    Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
    context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
Also used : ByteArrayOutputStream(java.io.ByteArrayOutputStream) FixedByteArrayOutputStream(io.pravega.common.io.FixedByteArrayOutputStream) Cleanup(lombok.Cleanup) AtomicLong(java.util.concurrent.atomic.AtomicLong) Random(java.util.Random) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) Test(org.junit.Test)

Aggregations

StorageOperation (io.pravega.segmentstore.server.logs.operations.StorageOperation)34 Test (org.junit.Test)24 Cleanup (lombok.Cleanup)21 DataCorruptionException (io.pravega.segmentstore.server.DataCorruptionException)15 UpdateableSegmentMetadata (io.pravega.segmentstore.server.UpdateableSegmentMetadata)15 FixedByteArrayOutputStream (io.pravega.common.io.FixedByteArrayOutputStream)14 StreamSegmentNotExistsException (io.pravega.segmentstore.contracts.StreamSegmentNotExistsException)14 MergeTransactionOperation (io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation)14 ByteArrayOutputStream (java.io.ByteArrayOutputStream)14 ArrayList (java.util.ArrayList)14 AtomicLong (java.util.concurrent.atomic.AtomicLong)14 StreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation)13 lombok.val (lombok.val)13 SegmentProperties (io.pravega.segmentstore.contracts.SegmentProperties)12 SegmentMetadata (io.pravega.segmentstore.server.SegmentMetadata)12 InputStream (java.io.InputStream)12 BadOffsetException (io.pravega.segmentstore.contracts.BadOffsetException)11 CachedStreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation)11 Operation (io.pravega.segmentstore.server.logs.operations.Operation)11 IntentionalException (io.pravega.test.common.IntentionalException)11