Search in sources :

Example 56 with Timeout

use of org.junit.rules.Timeout in project pravega by pravega.

the class EventSegmentReaderImplTest method testEventDataPartialTimeout.

@Test
public void testEventDataPartialTimeout() throws SegmentTruncatedException, EndOfSegmentException {
    // Setup Mocks
    SegmentInputStream segmentInputStream = mock(SegmentInputStream.class);
    @Cleanup EventSegmentReaderImpl segmentReader = new EventSegmentReaderImpl(segmentInputStream);
    doAnswer(i -> {
        ByteBuffer headerReadingBuffer = i.getArgument(0);
        headerReadingBuffer.putInt(WireCommandType.EVENT.getCode());
        headerReadingBuffer.putInt(10);
        return WireCommands.TYPE_PLUS_LENGTH_SIZE;
    }).when(segmentInputStream).read(any(ByteBuffer.class), eq(1000L));
    // simulate a partial read followed by timeout.
    doAnswer(i -> {
        ByteBuffer headerReadingBuffer = i.getArgument(0);
        // append 5 bytes. 5 Bytes are remaining.
        headerReadingBuffer.put((byte) 0x01);
        headerReadingBuffer.put((byte) 0x01);
        headerReadingBuffer.put((byte) 0x01);
        headerReadingBuffer.put((byte) 0x01);
        headerReadingBuffer.put((byte) 0x01);
        return 5;
    }).doReturn(// the second invocation should cause a timeout.
    0).when(segmentInputStream).read(any(ByteBuffer.class), eq(EventSegmentReaderImpl.PARTIAL_DATA_TIMEOUT));
    when(segmentInputStream.getSegmentId()).thenReturn(new Segment("scope", "stream", 0L));
    // Invoke read.
    ByteBuffer readData = segmentReader.read(1000);
    assertNull(readData);
    verify(segmentInputStream, times(1)).setOffset(0L, true);
    verify(segmentInputStream, times(0)).setOffset(0);
}
Also used : ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) ArgumentMatchers.eq(org.mockito.ArgumentMatchers.eq) Cleanup(lombok.Cleanup) Test(org.junit.Test) Mockito.times(org.mockito.Mockito.times) Mockito.when(org.mockito.Mockito.when) WireCommands(io.pravega.shared.protocol.netty.WireCommands) ByteBuffer(java.nio.ByteBuffer) Mockito.verify(org.mockito.Mockito.verify) TimeUnit(java.util.concurrent.TimeUnit) Rule(org.junit.Rule) Assert.assertNull(org.junit.Assert.assertNull) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Timeout(org.junit.rules.Timeout) WireCommandType(io.pravega.shared.protocol.netty.WireCommandType) Assert.assertEquals(org.junit.Assert.assertEquals) Mockito.mock(org.mockito.Mockito.mock) Cleanup(lombok.Cleanup) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 57 with Timeout

use of org.junit.rules.Timeout in project pravega by pravega.

the class SegmentAggregatorTests method testMerge.

/**
 * Tests the flush() method with Append and MergeTransactionOperations.
 * Overall strategy:
 * 1. Create one Parent Segment and N Transaction Segments.
 * 2. Populate all Transaction Segments with data.
 * 3. Seal the first N/2 Transaction Segments.
 * 4. Add some Appends, interspersed with Merge Transaction Ops to the Parent (for all Transactions)
 * 5. Call flush() repeatedly on all Segments, until nothing is flushed anymore. Verify only the first N/2 Transactions were merged.
 * 6. Seal the remaining N/2 Transaction Segments
 * 7. Call flush() repeatedly on all Segments, until nothing is flushed anymore. Verify all Transactions were merged.
 * 8. Verify the Parent Segment has all the data (from itself and its Transactions), in the correct order.
 */
@Test
@SuppressWarnings("checkstyle:CyclomaticComplexity")
public void testMerge() throws Exception {
    // This is number of appends per Segment/Transaction - there will be a lot of appends here.
    final int appendCount = 100;
    final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
    appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
    @Cleanup TestContext context = new TestContext(config);
    // Initialize all segments.
    context.segmentAggregator.initialize(TIMEOUT).join();
    for (SegmentAggregator a : context.transactionAggregators) {
        a.initialize(TIMEOUT).join();
    }
    // Store written data by segment - so we can check it later.
    HashMap<Long, ByteArrayOutputStream> dataBySegment = new HashMap<>();
    val actualMergeOpAck = new ArrayList<Map.Entry<Long, Long>>();
    context.dataSource.setCompleteMergeCallback((target, source) -> actualMergeOpAck.add(new AbstractMap.SimpleImmutableEntry<Long, Long>(target, source)));
    // Add a few appends to each Transaction aggregator and to the parent aggregator.
    // Seal the first half of the Transaction aggregators (thus, those Transactions will be fully flushed).
    HashSet<Long> sealedTransactionIds = new HashSet<>();
    for (int i = 0; i < context.transactionAggregators.length; i++) {
        SegmentAggregator transactionAggregator = context.transactionAggregators[i];
        long transactionId = transactionAggregator.getMetadata().getId();
        ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
        dataBySegment.put(transactionId, writtenData);
        for (int appendId = 0; appendId < appendCount; appendId++) {
            StorageOperation appendOp = generateAppendAndUpdateMetadata(appendId, transactionId, context);
            transactionAggregator.add(appendOp);
            getAppendData(appendOp, writtenData, context);
        }
        if (i < context.transactionAggregators.length / 2) {
            // We only seal the first half.
            transactionAggregator.add(generateSealAndUpdateMetadata(transactionId, context));
            sealedTransactionIds.add(transactionId);
        }
    }
    // Add MergeTransactionOperations to the parent aggregator, making sure we have both the following cases:
    // * Two or more consecutive MergeTransactionOperations both for Transactions that are sealed and for those that are not.
    // * MergeTransactionOperations with appends interspersed between them (in the parent), both for sealed Transactions and non-sealed Transactions.
    long parentSegmentId = context.segmentAggregator.getMetadata().getId();
    @Cleanup ByteArrayOutputStream parentData = new ByteArrayOutputStream();
    for (int transIndex = 0; transIndex < context.transactionAggregators.length; transIndex++) {
        // This helps ensure that we have both interspersed appends, and consecutive MergeTransactionOperations in the parent.
        if (transIndex % 2 == 1) {
            StorageOperation appendOp = generateAppendAndUpdateMetadata(transIndex, parentSegmentId, context);
            context.segmentAggregator.add(appendOp);
            getAppendData(appendOp, parentData, context);
        }
        // Merge this Transaction into the parent & record its data in the final parent data array.
        long transactionId = context.transactionAggregators[transIndex].getMetadata().getId();
        context.segmentAggregator.add(generateMergeTransactionAndUpdateMetadata(transactionId, context));
        ByteArrayOutputStream transactionData = dataBySegment.get(transactionId);
        parentData.write(transactionData.toByteArray());
        transactionData.close();
    }
    // Flush all the Aggregators as long as at least one of them reports being able to flush and that it did flush something.
    flushAllSegments(context);
    // Now check to see that only those Transactions that were sealed were merged.
    for (SegmentAggregator transactionAggregator : context.transactionAggregators) {
        SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
        boolean expectedMerged = sealedTransactionIds.contains(transactionMetadata.getId());
        if (expectedMerged) {
            Assert.assertTrue("Transaction to be merged was not marked as deleted in metadata.", transactionMetadata.isDeleted());
            Assert.assertFalse("Transaction to be merged still exists in storage.", context.storage.exists(transactionMetadata.getName(), TIMEOUT).join());
        } else {
            Assert.assertFalse("Transaction not to be merged was marked as deleted in metadata.", transactionMetadata.isDeleted());
            boolean exists = context.storage.exists(transactionMetadata.getName(), TIMEOUT).join();
            if (exists) {
                // We're not expecting this to exist, but if it does, do check it.
                SegmentProperties sp = context.storage.getStreamSegmentInfo(transactionMetadata.getName(), TIMEOUT).join();
                Assert.assertFalse("Transaction not to be merged is sealed in storage.", sp.isSealed());
            }
        }
    }
    // Then seal the rest of the Transactions and re-run the flush on the parent a few times.
    for (SegmentAggregator a : context.transactionAggregators) {
        long transactionId = a.getMetadata().getId();
        if (!sealedTransactionIds.contains(transactionId)) {
            // This Transaction was not sealed (and merged) previously. Do it now.
            a.add(generateSealAndUpdateMetadata(transactionId, context));
            sealedTransactionIds.add(transactionId);
        }
    }
    // Flush all the Aggregators as long as at least one of them reports being able to flush and that it did flush something.
    flushAllSegments(context);
    // Verify that all Transactions are now fully merged.
    for (SegmentAggregator transactionAggregator : context.transactionAggregators) {
        SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
        Assert.assertTrue("Merged Transaction was not marked as deleted in metadata.", transactionMetadata.isDeleted());
        Assert.assertFalse("Merged Transaction still exists in storage.", context.storage.exists(transactionMetadata.getName(), TIMEOUT).join());
    }
    // Verify that in the end, the contents of the parents is as expected.
    verifySegmentData(parentData.toByteArray(), context);
    // Verify calls to completeMerge.
    val expectedMergeOpSources = Arrays.stream(context.transactionAggregators).map(a -> a.getMetadata().getId()).collect(Collectors.toSet());
    Assert.assertEquals("Unexpected number of calls to completeMerge.", expectedMergeOpSources.size(), actualMergeOpAck.size());
    val actualMergeOpSources = actualMergeOpAck.stream().map(Map.Entry::getValue).collect(Collectors.toSet());
    AssertExtensions.assertContainsSameElements("Unexpected sources for invocation to completeMerge.", expectedMergeOpSources, actualMergeOpSources);
    for (Map.Entry<Long, Long> e : actualMergeOpAck) {
        Assert.assertEquals("Unexpected target for invocation to completeMerge.", context.segmentAggregator.getMetadata().getId(), (long) e.getKey());
    }
}
Also used : lombok.val(lombok.val) Arrays(java.util.Arrays) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AssertExtensions(io.pravega.test.common.AssertExtensions) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) RequiredArgsConstructor(lombok.RequiredArgsConstructor) Cleanup(lombok.Cleanup) Random(java.util.Random) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) AttributeUpdate(io.pravega.segmentstore.contracts.AttributeUpdate) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) ByteArrayInputStream(java.io.ByteArrayInputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) BufferView(io.pravega.common.util.BufferView) Duration(java.time.Duration) Map(java.util.Map) Operation(io.pravega.segmentstore.server.logs.operations.Operation) WriterFlushResult(io.pravega.segmentstore.server.WriterFlushResult) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) Attributes(io.pravega.segmentstore.contracts.Attributes) InMemoryStorage(io.pravega.segmentstore.storage.mocks.InMemoryStorage) Collectors(java.util.stream.Collectors) ErrorInjector(io.pravega.test.common.ErrorInjector) ByteBufferOutputStream(io.pravega.common.io.ByteBufferOutputStream) Stream(java.util.stream.Stream) ByteArraySegment(io.pravega.common.util.ByteArraySegment) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) Futures(io.pravega.common.concurrent.Futures) TestStorage(io.pravega.segmentstore.server.TestStorage) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Exceptions(io.pravega.common.Exceptions) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) Timeout(org.junit.rules.Timeout) OutputStream(java.io.OutputStream) ManualTimer(io.pravega.segmentstore.server.ManualTimer) UpdateAttributesOperation(io.pravega.segmentstore.server.logs.operations.UpdateAttributesOperation) AttributeId(io.pravega.segmentstore.contracts.AttributeId) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) AbstractMap(java.util.AbstractMap) AttributeUpdateCollection(io.pravega.segmentstore.contracts.AttributeUpdateCollection) Rule(org.junit.Rule) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) TreeMap(java.util.TreeMap) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Preconditions(com.google.common.base.Preconditions) AttributeUpdateType(io.pravega.segmentstore.contracts.AttributeUpdateType) RandomFactory(io.pravega.common.hash.RandomFactory) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) Collections(java.util.Collections) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) InputStream(java.io.InputStream) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Cleanup(lombok.Cleanup) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) AtomicLong(java.util.concurrent.atomic.AtomicLong) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) Map(java.util.Map) HashMap(java.util.HashMap) AbstractMap(java.util.AbstractMap) TreeMap(java.util.TreeMap) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 58 with Timeout

use of org.junit.rules.Timeout in project pravega by pravega.

the class WriterTableProcessorTests method checkIndex.

private void checkIndex(HashMap<BufferView, TableEntry> existingEntries, HashMap<BufferView, UUID> allKeys, TestContext context) throws Exception {
    // Get all the buckets associated with the given keys.
    val timer = new TimeoutTimer(TIMEOUT);
    val bucketsByHash = context.indexReader.locateBuckets(context.segmentMock, allKeys.values(), timer).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    // Index the existing Keys by their current offsets.
    val keysByOffset = existingEntries.entrySet().stream().collect(Collectors.toMap(e -> e.getValue().getKey().getVersion(), Map.Entry::getKey));
    // Load up all the offsets for all buckets.
    val buckets = bucketsByHash.values().stream().distinct().collect(Collectors.toMap(b -> b, b -> context.indexReader.getBucketOffsets(context.segmentMock, b, timer).join()));
    // Loop through all the bucket's offsets and verify that those offsets do point to existing keys.
    for (val e : buckets.entrySet()) {
        val bucketOffsets = e.getValue();
        for (val offset : bucketOffsets) {
            Assert.assertTrue("Found Bucket Offset that points to non-existing key.", keysByOffset.containsKey(offset));
        }
    }
    // TableBucket, otherwise it is not included in any bucket.
    for (val e : allKeys.entrySet()) {
        val key = e.getKey();
        val tableEntry = existingEntries.get(key);
        val bucket = bucketsByHash.get(e.getValue());
        Assert.assertNotNull("Test error: no bucket found.", bucket);
        val bucketOffsets = buckets.get(bucket);
        if (tableEntry != null) {
            // This key should exist: just verify the TableEntry's offset (Key Version) exists in the Bucket's offset list.
            Assert.assertTrue("Non-deleted key was not included in a Table Bucket.", bucketOffsets.contains(tableEntry.getKey().getVersion()));
        } else {
            // Verify that all the keys that the Table Bucket points to do not match our key. Use our existing offset-key cache for that.
            for (val offset : bucketOffsets) {
                val keyAtOffset = keysByOffset.get(offset);
                Assert.assertNotEquals("Deleted key was still included in a Table Bucket.", key, keyAtOffset);
            }
        }
    }
}
Also used : lombok.val(lombok.val) ObjectClosedException(io.pravega.common.ObjectClosedException) TableAttributes(io.pravega.segmentstore.contracts.tables.TableAttributes) AssertExtensions(io.pravega.test.common.AssertExtensions) RequiredArgsConstructor(lombok.RequiredArgsConstructor) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Cleanup(lombok.Cleanup) HashMap(java.util.HashMap) Random(java.util.Random) CompletableFuture(java.util.concurrent.CompletableFuture) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) ArrayList(java.util.ArrayList) AttributeUpdate(io.pravega.segmentstore.contracts.AttributeUpdate) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) BufferView(io.pravega.common.util.BufferView) Duration(java.time.Duration) Map(java.util.Map) Timeout(org.junit.rules.Timeout) Operation(io.pravega.segmentstore.server.logs.operations.Operation) TableKey(io.pravega.segmentstore.contracts.tables.TableKey) TimeoutTimer(io.pravega.common.TimeoutTimer) lombok.val(lombok.val) Test(org.junit.Test) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) TimeUnit(java.util.concurrent.TimeUnit) SegmentMock(io.pravega.segmentstore.server.SegmentMock) StreamSegmentMetadata(io.pravega.segmentstore.server.containers.StreamSegmentMetadata) AtomicLong(java.util.concurrent.atomic.AtomicLong) DirectSegmentAccess(io.pravega.segmentstore.server.DirectSegmentAccess) List(java.util.List) AttributeUpdateCollection(io.pravega.segmentstore.contracts.AttributeUpdateCollection) Rule(org.junit.Rule) ByteArraySegment(io.pravega.common.util.ByteArraySegment) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) Preconditions(com.google.common.base.Preconditions) AttributeUpdateType(io.pravega.segmentstore.contracts.AttributeUpdateType) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) TableEntry(io.pravega.segmentstore.contracts.tables.TableEntry) Collections(java.util.Collections) HashMap(java.util.HashMap) Map(java.util.Map) TimeoutTimer(io.pravega.common.TimeoutTimer)

Example 59 with Timeout

use of org.junit.rules.Timeout in project pravega by pravega.

the class AttributeAggregatorTests method testRecovery.

/**
 * Tests the ability to resume operations after a recovery.
 */
@Test
public void testRecovery() throws Exception {
    final WriterConfig config = DEFAULT_CONFIG;
    final int attributesPerUpdate = Math.max(1, config.getFlushAttributesThreshold() / 5);
    final int updateCount = config.getFlushAttributesThreshold() * 10;
    @Cleanup TestContext context = new TestContext(config);
    // Generate some data.
    val operations = new ArrayList<AttributeUpdaterOperation>();
    for (int i = 0; i < updateCount; i++) {
        // Add another operation.
        AttributeUpdaterOperation op = i % 2 == 0 ? generateAppendAndUpdateMetadata(attributesPerUpdate, context) : generateUpdateAttributesAndUpdateMetadata(attributesPerUpdate, context);
        operations.add(op);
    }
    // include all operations with indices less than or equal to recoveryId and observe the results.
    for (int recoveryId = 0; recoveryId < operations.size(); recoveryId++) {
        long lastPersistedSeqNo = context.segmentMetadata.getAttributes().getOrDefault(Attributes.ATTRIBUTE_SEGMENT_PERSIST_SEQ_NO, Operation.NO_SEQUENCE_NUMBER);
        val outstandingAttributes = new HashSet<AttributeId>();
        val firstOutstandingSeqNo = new AtomicLong(Operation.NO_SEQUENCE_NUMBER);
        val lastOutstandingSeqNo = new AtomicLong(Operation.NO_SEQUENCE_NUMBER);
        @Cleanup val aggregator = context.createAggregator();
        val expectedAttributes = new HashMap<AttributeId, Long>();
        for (int i = 0; i <= recoveryId; i++) {
            AttributeUpdaterOperation op = operations.get(i);
            // Collect the latest values from this update.
            op.getAttributeUpdates().stream().filter(au -> !Attributes.isCoreAttribute(au.getAttributeId())).forEach(au -> expectedAttributes.put(au.getAttributeId(), au.getValue()));
            aggregator.add(op);
            // We only expect to process an op if its SeqNo is beyond the last one we committed.
            boolean expectedToProcess = op.getSequenceNumber() > lastPersistedSeqNo;
            if (expectedToProcess) {
                addExtendedAttributes(op, outstandingAttributes);
                firstOutstandingSeqNo.compareAndSet(Operation.NO_SEQUENCE_NUMBER, op.getSequenceNumber());
                lastOutstandingSeqNo.set(op.getSequenceNumber());
            }
            Assert.assertEquals("Unexpected LUSN.", firstOutstandingSeqNo.get(), aggregator.getLowestUncommittedSequenceNumber());
            boolean expectFlush = outstandingAttributes.size() >= config.getFlushAttributesThreshold();
            Assert.assertEquals("Unexpected value returned by mustFlush() (count threshold).", expectFlush, aggregator.mustFlush());
            if (expectFlush) {
                // Call flush() and inspect the result.
                WriterFlushResult flushResult = aggregator.flush(TIMEOUT).join();
                Assert.assertEquals("Not all attributes were flushed (count threshold).", outstandingAttributes.size(), flushResult.getFlushedAttributes());
                // We want to verify just those attributes that we flushed, not all of them (not all may be in yet).
                AssertExtensions.assertMapEquals("Unexpected attributes stored in AttributeIndex.", expectedAttributes, context.dataSource.getPersistedAttributes(SEGMENT_ID));
                checkAutoAttributesEventual(lastOutstandingSeqNo.get(), context);
                outstandingAttributes.clear();
                firstOutstandingSeqNo.set(Operation.NO_SEQUENCE_NUMBER);
                lastOutstandingSeqNo.set(Operation.NO_SEQUENCE_NUMBER);
            }
        }
        // We have reached the end. Flush the rest and perform a full check.
        if (recoveryId == operations.size() - 1) {
            aggregator.add(generateSealAndUpdateMetadata(context));
            aggregator.flush(TIMEOUT).join();
            checkAttributes(context);
            checkAutoAttributesEventual(lastOutstandingSeqNo.get(), context);
        }
    }
}
Also used : lombok.val(lombok.val) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) SneakyThrows(lombok.SneakyThrows) AssertExtensions(io.pravega.test.common.AssertExtensions) RequiredArgsConstructor(lombok.RequiredArgsConstructor) Cleanup(lombok.Cleanup) Random(java.util.Random) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) AttributeUpdate(io.pravega.segmentstore.contracts.AttributeUpdate) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) Duration(java.time.Duration) Operation(io.pravega.segmentstore.server.logs.operations.Operation) WriterFlushResult(io.pravega.segmentstore.server.WriterFlushResult) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) Attributes(io.pravega.segmentstore.contracts.Attributes) Set(java.util.Set) Collectors(java.util.stream.Collectors) ErrorInjector(io.pravega.test.common.ErrorInjector) List(java.util.List) ByteArraySegment(io.pravega.common.util.ByteArraySegment) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) IntStream(java.util.stream.IntStream) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) HashMap(java.util.HashMap) Callable(java.util.concurrent.Callable) CompletableFuture(java.util.concurrent.CompletableFuture) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) AttributeUpdaterOperation(io.pravega.segmentstore.server.logs.operations.AttributeUpdaterOperation) Timeout(org.junit.rules.Timeout) ManualTimer(io.pravega.segmentstore.server.ManualTimer) UpdateAttributesOperation(io.pravega.segmentstore.server.logs.operations.UpdateAttributesOperation) AttributeId(io.pravega.segmentstore.contracts.AttributeId) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) AttributeUpdateCollection(io.pravega.segmentstore.contracts.AttributeUpdateCollection) Rule(org.junit.Rule) SegmentOperation(io.pravega.segmentstore.server.SegmentOperation) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) AttributeUpdateType(io.pravega.segmentstore.contracts.AttributeUpdateType) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) Collections(java.util.Collections) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) HashMap(java.util.HashMap) AttributeUpdaterOperation(io.pravega.segmentstore.server.logs.operations.AttributeUpdaterOperation) ArrayList(java.util.ArrayList) WriterFlushResult(io.pravega.segmentstore.server.WriterFlushResult) Cleanup(lombok.Cleanup) AtomicLong(java.util.concurrent.atomic.AtomicLong) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 60 with Timeout

use of org.junit.rules.Timeout in project pravega by pravega.

the class AttributeAggregatorTests method testFlushWithGenericErrors.

/**
 * Tests {@link AttributeAggregator#flush} in the presence of generic errors.
 */
@Test
public void testFlushWithGenericErrors() throws Exception {
    final WriterConfig config = DEFAULT_CONFIG;
    @Cleanup TestContext context = new TestContext(config);
    // Add a single operation, which alone should trigger the flush.
    AttributeUpdaterOperation op = generateUpdateAttributesAndUpdateMetadata(config.getFlushAttributesThreshold(), context);
    context.aggregator.add(op);
    Assert.assertTrue("Unexpected result from mustFlush().", context.aggregator.mustFlush());
    // Cause the attribute update to fail, and validate that the error is bubbled up.
    context.dataSource.setPersistAttributesErrorInjector(new ErrorInjector<>(i -> true, IntentionalException::new));
    AssertExtensions.assertSuppliedFutureThrows("Expected flush() to have failed.", () -> context.aggregator.flush(TIMEOUT), ex -> ex instanceof IntentionalException);
    Assert.assertTrue("Unexpected result from mustFlush() after failed attempt.", context.aggregator.mustFlush());
    checkAutoAttributes(Operation.NO_SEQUENCE_NUMBER, context);
    // Now try again, without errors.
    context.dataSource.setPersistAttributesErrorInjector(null);
    val result = context.aggregator.flush(TIMEOUT).join();
    Assert.assertEquals("Unexpected number of attributes flushed", op.getAttributeUpdates().size() - 1, // Subtract 1 for core attributes.
    result.getFlushedAttributes());
    checkAttributes(context);
    checkAutoAttributesEventual(op.getSequenceNumber(), context);
}
Also used : StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) SneakyThrows(lombok.SneakyThrows) AssertExtensions(io.pravega.test.common.AssertExtensions) RequiredArgsConstructor(lombok.RequiredArgsConstructor) Cleanup(lombok.Cleanup) Random(java.util.Random) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) AttributeUpdate(io.pravega.segmentstore.contracts.AttributeUpdate) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) Duration(java.time.Duration) Operation(io.pravega.segmentstore.server.logs.operations.Operation) WriterFlushResult(io.pravega.segmentstore.server.WriterFlushResult) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) Attributes(io.pravega.segmentstore.contracts.Attributes) Set(java.util.Set) Collectors(java.util.stream.Collectors) ErrorInjector(io.pravega.test.common.ErrorInjector) List(java.util.List) ByteArraySegment(io.pravega.common.util.ByteArraySegment) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) IntStream(java.util.stream.IntStream) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) HashMap(java.util.HashMap) Callable(java.util.concurrent.Callable) CompletableFuture(java.util.concurrent.CompletableFuture) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) AttributeUpdaterOperation(io.pravega.segmentstore.server.logs.operations.AttributeUpdaterOperation) Timeout(org.junit.rules.Timeout) ManualTimer(io.pravega.segmentstore.server.ManualTimer) UpdateAttributesOperation(io.pravega.segmentstore.server.logs.operations.UpdateAttributesOperation) AttributeId(io.pravega.segmentstore.contracts.AttributeId) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) AttributeUpdateCollection(io.pravega.segmentstore.contracts.AttributeUpdateCollection) Rule(org.junit.Rule) SegmentOperation(io.pravega.segmentstore.server.SegmentOperation) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) AttributeUpdateType(io.pravega.segmentstore.contracts.AttributeUpdateType) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) Collections(java.util.Collections) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) lombok.val(lombok.val) AttributeUpdaterOperation(io.pravega.segmentstore.server.logs.operations.AttributeUpdaterOperation) Cleanup(lombok.Cleanup) IntentionalException(io.pravega.test.common.IntentionalException) Test(org.junit.Test)

Aggregations

Timeout (org.junit.rules.Timeout)92 Rule (org.junit.Rule)91 Test (org.junit.Test)91 Assert (org.junit.Assert)84 AssertExtensions (io.pravega.test.common.AssertExtensions)81 Duration (java.time.Duration)81 ArrayList (java.util.ArrayList)80 lombok.val (lombok.val)79 TimeUnit (java.util.concurrent.TimeUnit)78 ThreadPooledTestSuite (io.pravega.test.common.ThreadPooledTestSuite)77 Cleanup (lombok.Cleanup)73 Collections (java.util.Collections)72 CompletableFuture (java.util.concurrent.CompletableFuture)72 HashMap (java.util.HashMap)69 ByteArraySegment (io.pravega.common.util.ByteArraySegment)68 Collectors (java.util.stream.Collectors)68 List (java.util.List)66 AtomicReference (java.util.concurrent.atomic.AtomicReference)66 IntentionalException (io.pravega.test.common.IntentionalException)62 Map (java.util.Map)62