Search in sources :

Example 26 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregatorTests method testDeletedSegmentInMetadata.

/**
 * Tests the case when a Segment is deleted in the Metadata and ReadIndex.
 * Note that we are not testing the case when the Segment is deleted only in Storage, since the SegmentContainer
 * first deletes in the Metadata and then in Storage, and that would simply throw a StreamSegmentNotExistsException,
 * which will subside as soon as the Metadata is updated and the StorageWriter cleans up the SegmentAggregator.
 */
@Test
public void testDeletedSegmentInMetadata() throws Exception {
    final WriterConfig config = DEFAULT_CONFIG;
    @Cleanup TestContext context = new TestContext(config);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT).join();
    // Add one operation big enough to trigger a Flush.
    byte[] appendData = new byte[config.getFlushThresholdBytes() + 1];
    StorageOperation appendOp = generateAppendAndUpdateMetadata(SEGMENT_ID, appendData, context);
    context.segmentAggregator.add(appendOp);
    Assert.assertTrue("Unexpected value returned by mustFlush() (size threshold).", context.segmentAggregator.mustFlush());
    // Delete the segment in the Metadata & Read index. We want to make sure we do this while the flush() method is
    // running, hence the callback (flush() has a check at the beginning that exits if the metadata indicates deleted).
    context.dataSource.setOnGetAppendData(() -> {
        context.containerMetadata.deleteStreamSegment(context.segmentAggregator.getMetadata().getName());
        context.dataSource.clearAppendData();
    });
    // Call flush() and inspect the result.
    FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
    Assert.assertEquals("Not expecting any bytes to be flushed.", 0, flushResult.getFlushedBytes());
    Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
    Assert.assertFalse("Unexpected value returned by mustFlush() after flush.", context.segmentAggregator.mustFlush());
}
Also used : StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Example 27 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregatorTests method testTruncateAndSeal.

/**
 * Tests the flush() method with StreamSegmentTruncateOperations after the segment has been Sealed.
 */
@Test
public void testTruncateAndSeal() throws Exception {
    // Add some data and intersperse with truncates.
    final int appendCount = 1000;
    final int truncateEvery = 20;
    final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
    appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
    @Cleanup TestContext context = new TestContext(config);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT).join();
    @Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
    // Accumulate some Appends
    AtomicLong outstandingSize = new AtomicLong();
    SequenceNumberCalculator sequenceNumbers = new SequenceNumberCalculator(context, outstandingSize);
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        outstandingSize.addAndGet(appendOp.getLength());
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);
        sequenceNumbers.record(appendOp);
        if (i % truncateEvery == 1) {
            StorageOperation truncateOp = generateTruncateAndUpdateMetadata(SEGMENT_ID, context);
            context.segmentAggregator.add(truncateOp);
            sequenceNumbers.record(truncateOp);
        }
    }
    // Generate and add a Seal Operation.
    StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context);
    context.segmentAggregator.add(sealOp);
    // Add another truncate op, after the Seal.
    StorageOperation lastTruncateOp = generateTruncateAndUpdateMetadata(SEGMENT_ID, context);
    context.segmentAggregator.add(lastTruncateOp);
    FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
    Assert.assertEquals("Expected the entire Aggregator to be flushed.", outstandingSize.get(), flushResult.getFlushedBytes());
    Assert.assertFalse("Unexpected value returned by mustFlush() after flushing.", context.segmentAggregator.mustFlush());
    Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flushing.", Operation.NO_SEQUENCE_NUMBER, context.segmentAggregator.getLowestUncommittedSequenceNumber());
    // Verify data.
    byte[] expectedData = writtenData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    SegmentProperties storageInfo = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageInfo.getLength());
    Assert.assertTrue("Unexpected sealed status in Storage.", storageInfo.isSealed());
    Assert.assertEquals("Unexpected truncation offset in Storage.", lastTruncateOp.getStreamSegmentOffset(), context.storage.getTruncationOffset(context.segmentAggregator.getMetadata().getName()));
    context.storage.read(InMemoryStorage.newHandle(context.segmentAggregator.getMetadata().getName(), false), 0, actualData, 0, actualData.length, TIMEOUT).join();
    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
Also used : AtomicLong(java.util.concurrent.atomic.AtomicLong) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) ByteArrayOutputStream(java.io.ByteArrayOutputStream) FixedByteArrayOutputStream(io.pravega.common.io.FixedByteArrayOutputStream) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Example 28 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregatorTests method testReconcileSeal.

/**
 * Tests the ability of the SegmentAggregator to reconcile StreamSegmentSealOperations.
 */
@Test
public void testReconcileSeal() throws Exception {
    @Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT).join();
    // The seal succeeds, but we throw some random error, indicating that it didn't.
    context.storage.setSealInterceptor((segmentName, storage) -> {
        storage.seal(writeHandle(segmentName), TIMEOUT).join();
        throw new IntentionalException(String.format("S=%s", segmentName));
    });
    // Attempt to seal.
    StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context);
    context.segmentAggregator.add(sealOp);
    // First time: attempt to flush/seal, which must end in failure.
    AssertExtensions.assertThrows("IntentionalException did not propagate to flush() caller.", () -> context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS), ex -> Exceptions.unwrap(ex) instanceof IntentionalException);
    context.storage.setSealInterceptor(null);
    // Second time: we are in reconciliation mode, so flush must succeed (and update internal state based on storage).
    context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    // Verify outcome.
    Assert.assertTrue("Segment not marked as sealed in storage (in metadata).", context.segmentAggregator.getMetadata().isSealedInStorage());
    Assert.assertTrue("SegmentAggregator not closed.", context.segmentAggregator.isClosed());
}
Also used : StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) Cleanup(lombok.Cleanup) IntentionalException(io.pravega.test.common.IntentionalException) Test(org.junit.Test)

Example 29 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class OperationProcessorTests method testWithInvalidOperations.

/**
 * Tests the ability of the OperationProcessor to process Operations when encountering invalid operations (such as
 * appends to StreamSegments that do not exist or to those that are sealed). This covers the following exceptions:
 * * StreamSegmentNotExistsException
 * * StreamSegmentSealedException
 * * General MetadataUpdateException.
 */
@Test
public void testWithInvalidOperations() throws Exception {
    int streamSegmentCount = 10;
    int appendsPerStreamSegment = 40;
    // We are going to prematurely seal this StreamSegment.
    long sealedStreamSegmentId = 6;
    // We are going to prematurely mark this StreamSegment as deleted.
    long deletedStreamSegmentId = 8;
    // This is a bogus StreamSegment, that does not exist.
    long nonExistentStreamSegmentId;
    @Cleanup TestContext context = new TestContext();
    // Generate some test data (no need to complicate ourselves with Transactions here; that is tested in the no-failure test).
    HashSet<Long> streamSegmentIds = createStreamSegmentsInMetadata(streamSegmentCount, context.metadata);
    nonExistentStreamSegmentId = streamSegmentIds.size();
    streamSegmentIds.add(nonExistentStreamSegmentId);
    context.metadata.getStreamSegmentMetadata(sealedStreamSegmentId).markSealed();
    context.metadata.getStreamSegmentMetadata(deletedStreamSegmentId).markDeleted();
    List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
    // Setup an OperationProcessor and start it.
    @Cleanup TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, MAX_DATA_LOG_APPEND_SIZE, executorService());
    dataLog.initialize(TIMEOUT);
    @Cleanup OperationProcessor operationProcessor = new OperationProcessor(context.metadata, context.stateUpdater, dataLog, getNoOpCheckpointPolicy(), executorService());
    operationProcessor.startAsync().awaitRunning();
    // Process all generated operations.
    List<OperationWithCompletion> completionFutures = processOperations(operations, operationProcessor);
    // Wait for all such operations to complete. We are expecting exceptions, so verify that we do.
    AssertExtensions.assertThrows("No operations failed.", OperationWithCompletion.allOf(completionFutures)::join, ex -> ex instanceof MetadataUpdateException || ex instanceof StreamSegmentException);
    HashSet<Long> streamSegmentsWithNoContents = new HashSet<>();
    streamSegmentsWithNoContents.add(sealedStreamSegmentId);
    streamSegmentsWithNoContents.add(deletedStreamSegmentId);
    streamSegmentsWithNoContents.add(nonExistentStreamSegmentId);
    // Verify that the "right" operations failed, while the others succeeded.
    for (OperationWithCompletion oc : completionFutures) {
        if (oc.operation instanceof StorageOperation) {
            long streamSegmentId = ((StorageOperation) oc.operation).getStreamSegmentId();
            if (streamSegmentsWithNoContents.contains(streamSegmentId)) {
                Assert.assertTrue("Completion future for invalid StreamSegment " + streamSegmentId + " did not complete exceptionally.", oc.completion.isCompletedExceptionally());
                Predicate<Throwable> errorValidator;
                if (streamSegmentId == sealedStreamSegmentId) {
                    errorValidator = ex -> ex instanceof StreamSegmentSealedException;
                } else if (streamSegmentId == deletedStreamSegmentId) {
                    errorValidator = ex -> ex instanceof StreamSegmentNotExistsException;
                } else {
                    errorValidator = ex -> ex instanceof MetadataUpdateException;
                }
                AssertExtensions.assertThrows("Unexpected exception for failed Operation.", oc.completion::join, errorValidator);
                continue;
            }
        }
        // If we get here, we must verify no exception was thrown.
        oc.completion.join();
    }
    performLogOperationChecks(completionFutures, context.memoryLog, dataLog, context.metadata);
    performMetadataChecks(streamSegmentIds, streamSegmentsWithNoContents, new HashMap<>(), completionFutures, context.metadata, false, false);
    performReadIndexChecks(completionFutures, context.readIndex);
    operationProcessor.stopAsync().awaitTerminated();
}
Also used : OperationSerializer(io.pravega.segmentstore.server.logs.operations.OperationSerializer) Storage(io.pravega.segmentstore.storage.Storage) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AssertExtensions(io.pravega.test.common.AssertExtensions) ProbeOperation(io.pravega.segmentstore.server.logs.operations.ProbeOperation) RequiredArgsConstructor(lombok.RequiredArgsConstructor) Cleanup(lombok.Cleanup) LogAddress(io.pravega.segmentstore.storage.LogAddress) ArrayView(io.pravega.common.util.ArrayView) SequencedItemList(io.pravega.common.util.SequencedItemList) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) InMemoryStorageFactory(io.pravega.segmentstore.storage.mocks.InMemoryStorageFactory) Duration(java.time.Duration) Operation(io.pravega.segmentstore.server.logs.operations.Operation) CloseableIterator(io.pravega.common.util.CloseableIterator) CacheFactory(io.pravega.segmentstore.storage.CacheFactory) ServiceListeners(io.pravega.segmentstore.server.ServiceListeners) CancellationException(java.util.concurrent.CancellationException) Predicate(java.util.function.Predicate) Collection(java.util.Collection) CacheManager(io.pravega.segmentstore.server.reading.CacheManager) CompletionException(java.util.concurrent.CompletionException) DataLogWriterNotPrimaryException(io.pravega.segmentstore.storage.DataLogWriterNotPrimaryException) Collectors(java.util.stream.Collectors) ErrorInjector(io.pravega.test.common.ErrorInjector) List(java.util.List) InMemoryCacheFactory(io.pravega.segmentstore.storage.mocks.InMemoryCacheFactory) ObjectClosedException(io.pravega.common.ObjectClosedException) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) StreamSegmentException(io.pravega.segmentstore.contracts.StreamSegmentException) ConfigHelpers(io.pravega.segmentstore.server.ConfigHelpers) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) Supplier(java.util.function.Supplier) TruncationMarkerRepository(io.pravega.segmentstore.server.TruncationMarkerRepository) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) Runnables(com.google.common.util.concurrent.Runnables) ReadIndexConfig(io.pravega.segmentstore.server.reading.ReadIndexConfig) Timeout(org.junit.rules.Timeout) DurableDataLogException(io.pravega.segmentstore.storage.DurableDataLogException) OperationComparer(io.pravega.segmentstore.server.logs.operations.OperationComparer) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) DurableDataLog(io.pravega.segmentstore.storage.DurableDataLog) Iterator(java.util.Iterator) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) Service(com.google.common.util.concurrent.Service) TimeUnit(java.util.concurrent.TimeUnit) AbstractMap(java.util.AbstractMap) Rule(org.junit.Rule) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) QueueStats(io.pravega.segmentstore.storage.QueueStats) ReadIndex(io.pravega.segmentstore.server.ReadIndex) Assert(org.junit.Assert) Collections(java.util.Collections) TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) StreamSegmentException(io.pravega.segmentstore.contracts.StreamSegmentException) ProbeOperation(io.pravega.segmentstore.server.logs.operations.ProbeOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Cleanup(lombok.Cleanup) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 30 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregator method reconcileMergeOperation.

/**
 * Attempts to reconcile the given MergeTransactionOperation.
 *
 * @param op          The Operation to reconcile.
 * @param storageInfo The current state of the Segment in Storage.
 * @param timer       Timer for the operation
 * @return A CompletableFuture containing a FlushResult with the number of bytes reconciled, or failed with a ReconciliationFailureException,
 * if the operation cannot be reconciled, based on the in-memory metadata or the current state of the Segment in Storage.
 */
private CompletableFuture<FlushResult> reconcileMergeOperation(MergeTransactionOperation op, SegmentProperties storageInfo, TimeoutTimer timer) {
    // Verify that the transaction segment is still registered in metadata.
    UpdateableSegmentMetadata transactionMeta = this.dataSource.getStreamSegmentMetadata(op.getTransactionSegmentId());
    if (transactionMeta == null || transactionMeta.isDeleted()) {
        return Futures.failedFuture(new ReconciliationFailureException(String.format("Cannot reconcile operation '%s' because the transaction segment is deleted or missing from the metadata.", op), this.metadata, storageInfo));
    }
    // Verify that the operation fits fully within this segment (mergers are atomic - they either merge all or nothing).
    if (op.getLastStreamSegmentOffset() > storageInfo.getLength()) {
        return Futures.failedFuture(new ReconciliationFailureException(String.format("Cannot reconcile operation '%s' because the transaction segment is not fully merged into the parent.", op), this.metadata, storageInfo));
    }
    // Verify that the transaction segment does not exist in Storage anymore.
    return this.storage.exists(transactionMeta.getName(), timer.getRemaining()).thenApplyAsync(exists -> {
        if (exists) {
            throw new CompletionException(new ReconciliationFailureException(String.format("Cannot reconcile operation '%s' because the transaction segment still exists in Storage.", op), this.metadata, storageInfo));
        }
        // Pop the first operation off the list and update the metadata for the transaction segment.
        StorageOperation processedOperation = this.operations.removeFirst();
        assert processedOperation != null && processedOperation instanceof MergeTransactionOperation : "First outstanding operation was not a MergeTransactionOperation";
        int newCount = this.mergeTransactionCount.decrementAndGet();
        assert newCount >= 0 : "Negative value for mergeTransactionCount";
        updateMetadataForTransactionPostMerger(transactionMeta);
        return new FlushResult().withMergedBytes(op.getLength());
    }, this.executor);
}
Also used : UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) CompletionException(java.util.concurrent.CompletionException) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) MergeTransactionOperation(io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation)

Aggregations

StorageOperation (io.pravega.segmentstore.server.logs.operations.StorageOperation)34 Test (org.junit.Test)24 Cleanup (lombok.Cleanup)21 DataCorruptionException (io.pravega.segmentstore.server.DataCorruptionException)15 UpdateableSegmentMetadata (io.pravega.segmentstore.server.UpdateableSegmentMetadata)15 FixedByteArrayOutputStream (io.pravega.common.io.FixedByteArrayOutputStream)14 StreamSegmentNotExistsException (io.pravega.segmentstore.contracts.StreamSegmentNotExistsException)14 MergeTransactionOperation (io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation)14 ByteArrayOutputStream (java.io.ByteArrayOutputStream)14 ArrayList (java.util.ArrayList)14 AtomicLong (java.util.concurrent.atomic.AtomicLong)14 StreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation)13 lombok.val (lombok.val)13 SegmentProperties (io.pravega.segmentstore.contracts.SegmentProperties)12 SegmentMetadata (io.pravega.segmentstore.server.SegmentMetadata)12 InputStream (java.io.InputStream)12 BadOffsetException (io.pravega.segmentstore.contracts.BadOffsetException)11 CachedStreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation)11 Operation (io.pravega.segmentstore.server.logs.operations.Operation)11 IntentionalException (io.pravega.test.common.IntentionalException)11