use of org.junit.rules.Timeout in project pravega by pravega.
the class EventSegmentReaderImplTest method testEventDataPartialTimeout.
@Test
public void testEventDataPartialTimeout() throws SegmentTruncatedException, EndOfSegmentException {
// Setup Mocks
SegmentInputStream segmentInputStream = mock(SegmentInputStream.class);
@Cleanup EventSegmentReaderImpl segmentReader = new EventSegmentReaderImpl(segmentInputStream);
doAnswer(i -> {
ByteBuffer headerReadingBuffer = i.getArgument(0);
headerReadingBuffer.putInt(WireCommandType.EVENT.getCode());
headerReadingBuffer.putInt(10);
return WireCommands.TYPE_PLUS_LENGTH_SIZE;
}).when(segmentInputStream).read(any(ByteBuffer.class), eq(1000L));
// simulate a partial read followed by timeout.
doAnswer(i -> {
ByteBuffer headerReadingBuffer = i.getArgument(0);
// append 5 bytes. 5 Bytes are remaining.
headerReadingBuffer.put((byte) 0x01);
headerReadingBuffer.put((byte) 0x01);
headerReadingBuffer.put((byte) 0x01);
headerReadingBuffer.put((byte) 0x01);
headerReadingBuffer.put((byte) 0x01);
return 5;
}).doReturn(// the second invocation should cause a timeout.
0).when(segmentInputStream).read(any(ByteBuffer.class), eq(EventSegmentReaderImpl.PARTIAL_DATA_TIMEOUT));
when(segmentInputStream.getSegmentId()).thenReturn(new Segment("scope", "stream", 0L));
// Invoke read.
ByteBuffer readData = segmentReader.read(1000);
assertNull(readData);
verify(segmentInputStream, times(1)).setOffset(0L, true);
verify(segmentInputStream, times(0)).setOffset(0);
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class SegmentAggregatorTests method testMerge.
/**
* Tests the flush() method with Append and MergeTransactionOperations.
* Overall strategy:
* 1. Create one Parent Segment and N Transaction Segments.
* 2. Populate all Transaction Segments with data.
* 3. Seal the first N/2 Transaction Segments.
* 4. Add some Appends, interspersed with Merge Transaction Ops to the Parent (for all Transactions)
* 5. Call flush() repeatedly on all Segments, until nothing is flushed anymore. Verify only the first N/2 Transactions were merged.
* 6. Seal the remaining N/2 Transaction Segments
* 7. Call flush() repeatedly on all Segments, until nothing is flushed anymore. Verify all Transactions were merged.
* 8. Verify the Parent Segment has all the data (from itself and its Transactions), in the correct order.
*/
@Test
@SuppressWarnings("checkstyle:CyclomaticComplexity")
public void testMerge() throws Exception {
// This is number of appends per Segment/Transaction - there will be a lot of appends here.
final int appendCount = 100;
final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
@Cleanup TestContext context = new TestContext(config);
// Initialize all segments.
context.segmentAggregator.initialize(TIMEOUT).join();
for (SegmentAggregator a : context.transactionAggregators) {
a.initialize(TIMEOUT).join();
}
// Store written data by segment - so we can check it later.
HashMap<Long, ByteArrayOutputStream> dataBySegment = new HashMap<>();
val actualMergeOpAck = new ArrayList<Map.Entry<Long, Long>>();
context.dataSource.setCompleteMergeCallback((target, source) -> actualMergeOpAck.add(new AbstractMap.SimpleImmutableEntry<Long, Long>(target, source)));
// Add a few appends to each Transaction aggregator and to the parent aggregator.
// Seal the first half of the Transaction aggregators (thus, those Transactions will be fully flushed).
HashSet<Long> sealedTransactionIds = new HashSet<>();
for (int i = 0; i < context.transactionAggregators.length; i++) {
SegmentAggregator transactionAggregator = context.transactionAggregators[i];
long transactionId = transactionAggregator.getMetadata().getId();
ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
dataBySegment.put(transactionId, writtenData);
for (int appendId = 0; appendId < appendCount; appendId++) {
StorageOperation appendOp = generateAppendAndUpdateMetadata(appendId, transactionId, context);
transactionAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
}
if (i < context.transactionAggregators.length / 2) {
// We only seal the first half.
transactionAggregator.add(generateSealAndUpdateMetadata(transactionId, context));
sealedTransactionIds.add(transactionId);
}
}
// Add MergeTransactionOperations to the parent aggregator, making sure we have both the following cases:
// * Two or more consecutive MergeTransactionOperations both for Transactions that are sealed and for those that are not.
// * MergeTransactionOperations with appends interspersed between them (in the parent), both for sealed Transactions and non-sealed Transactions.
long parentSegmentId = context.segmentAggregator.getMetadata().getId();
@Cleanup ByteArrayOutputStream parentData = new ByteArrayOutputStream();
for (int transIndex = 0; transIndex < context.transactionAggregators.length; transIndex++) {
// This helps ensure that we have both interspersed appends, and consecutive MergeTransactionOperations in the parent.
if (transIndex % 2 == 1) {
StorageOperation appendOp = generateAppendAndUpdateMetadata(transIndex, parentSegmentId, context);
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, parentData, context);
}
// Merge this Transaction into the parent & record its data in the final parent data array.
long transactionId = context.transactionAggregators[transIndex].getMetadata().getId();
context.segmentAggregator.add(generateMergeTransactionAndUpdateMetadata(transactionId, context));
ByteArrayOutputStream transactionData = dataBySegment.get(transactionId);
parentData.write(transactionData.toByteArray());
transactionData.close();
}
// Flush all the Aggregators as long as at least one of them reports being able to flush and that it did flush something.
flushAllSegments(context);
// Now check to see that only those Transactions that were sealed were merged.
for (SegmentAggregator transactionAggregator : context.transactionAggregators) {
SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
boolean expectedMerged = sealedTransactionIds.contains(transactionMetadata.getId());
if (expectedMerged) {
Assert.assertTrue("Transaction to be merged was not marked as deleted in metadata.", transactionMetadata.isDeleted());
Assert.assertFalse("Transaction to be merged still exists in storage.", context.storage.exists(transactionMetadata.getName(), TIMEOUT).join());
} else {
Assert.assertFalse("Transaction not to be merged was marked as deleted in metadata.", transactionMetadata.isDeleted());
boolean exists = context.storage.exists(transactionMetadata.getName(), TIMEOUT).join();
if (exists) {
// We're not expecting this to exist, but if it does, do check it.
SegmentProperties sp = context.storage.getStreamSegmentInfo(transactionMetadata.getName(), TIMEOUT).join();
Assert.assertFalse("Transaction not to be merged is sealed in storage.", sp.isSealed());
}
}
}
// Then seal the rest of the Transactions and re-run the flush on the parent a few times.
for (SegmentAggregator a : context.transactionAggregators) {
long transactionId = a.getMetadata().getId();
if (!sealedTransactionIds.contains(transactionId)) {
// This Transaction was not sealed (and merged) previously. Do it now.
a.add(generateSealAndUpdateMetadata(transactionId, context));
sealedTransactionIds.add(transactionId);
}
}
// Flush all the Aggregators as long as at least one of them reports being able to flush and that it did flush something.
flushAllSegments(context);
// Verify that all Transactions are now fully merged.
for (SegmentAggregator transactionAggregator : context.transactionAggregators) {
SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
Assert.assertTrue("Merged Transaction was not marked as deleted in metadata.", transactionMetadata.isDeleted());
Assert.assertFalse("Merged Transaction still exists in storage.", context.storage.exists(transactionMetadata.getName(), TIMEOUT).join());
}
// Verify that in the end, the contents of the parents is as expected.
verifySegmentData(parentData.toByteArray(), context);
// Verify calls to completeMerge.
val expectedMergeOpSources = Arrays.stream(context.transactionAggregators).map(a -> a.getMetadata().getId()).collect(Collectors.toSet());
Assert.assertEquals("Unexpected number of calls to completeMerge.", expectedMergeOpSources.size(), actualMergeOpAck.size());
val actualMergeOpSources = actualMergeOpAck.stream().map(Map.Entry::getValue).collect(Collectors.toSet());
AssertExtensions.assertContainsSameElements("Unexpected sources for invocation to completeMerge.", expectedMergeOpSources, actualMergeOpSources);
for (Map.Entry<Long, Long> e : actualMergeOpAck) {
Assert.assertEquals("Unexpected target for invocation to completeMerge.", context.segmentAggregator.getMetadata().getId(), (long) e.getKey());
}
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class WriterTableProcessorTests method checkIndex.
private void checkIndex(HashMap<BufferView, TableEntry> existingEntries, HashMap<BufferView, UUID> allKeys, TestContext context) throws Exception {
// Get all the buckets associated with the given keys.
val timer = new TimeoutTimer(TIMEOUT);
val bucketsByHash = context.indexReader.locateBuckets(context.segmentMock, allKeys.values(), timer).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Index the existing Keys by their current offsets.
val keysByOffset = existingEntries.entrySet().stream().collect(Collectors.toMap(e -> e.getValue().getKey().getVersion(), Map.Entry::getKey));
// Load up all the offsets for all buckets.
val buckets = bucketsByHash.values().stream().distinct().collect(Collectors.toMap(b -> b, b -> context.indexReader.getBucketOffsets(context.segmentMock, b, timer).join()));
// Loop through all the bucket's offsets and verify that those offsets do point to existing keys.
for (val e : buckets.entrySet()) {
val bucketOffsets = e.getValue();
for (val offset : bucketOffsets) {
Assert.assertTrue("Found Bucket Offset that points to non-existing key.", keysByOffset.containsKey(offset));
}
}
// TableBucket, otherwise it is not included in any bucket.
for (val e : allKeys.entrySet()) {
val key = e.getKey();
val tableEntry = existingEntries.get(key);
val bucket = bucketsByHash.get(e.getValue());
Assert.assertNotNull("Test error: no bucket found.", bucket);
val bucketOffsets = buckets.get(bucket);
if (tableEntry != null) {
// This key should exist: just verify the TableEntry's offset (Key Version) exists in the Bucket's offset list.
Assert.assertTrue("Non-deleted key was not included in a Table Bucket.", bucketOffsets.contains(tableEntry.getKey().getVersion()));
} else {
// Verify that all the keys that the Table Bucket points to do not match our key. Use our existing offset-key cache for that.
for (val offset : bucketOffsets) {
val keyAtOffset = keysByOffset.get(offset);
Assert.assertNotEquals("Deleted key was still included in a Table Bucket.", key, keyAtOffset);
}
}
}
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class AttributeAggregatorTests method testRecovery.
/**
* Tests the ability to resume operations after a recovery.
*/
@Test
public void testRecovery() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
final int attributesPerUpdate = Math.max(1, config.getFlushAttributesThreshold() / 5);
final int updateCount = config.getFlushAttributesThreshold() * 10;
@Cleanup TestContext context = new TestContext(config);
// Generate some data.
val operations = new ArrayList<AttributeUpdaterOperation>();
for (int i = 0; i < updateCount; i++) {
// Add another operation.
AttributeUpdaterOperation op = i % 2 == 0 ? generateAppendAndUpdateMetadata(attributesPerUpdate, context) : generateUpdateAttributesAndUpdateMetadata(attributesPerUpdate, context);
operations.add(op);
}
// include all operations with indices less than or equal to recoveryId and observe the results.
for (int recoveryId = 0; recoveryId < operations.size(); recoveryId++) {
long lastPersistedSeqNo = context.segmentMetadata.getAttributes().getOrDefault(Attributes.ATTRIBUTE_SEGMENT_PERSIST_SEQ_NO, Operation.NO_SEQUENCE_NUMBER);
val outstandingAttributes = new HashSet<AttributeId>();
val firstOutstandingSeqNo = new AtomicLong(Operation.NO_SEQUENCE_NUMBER);
val lastOutstandingSeqNo = new AtomicLong(Operation.NO_SEQUENCE_NUMBER);
@Cleanup val aggregator = context.createAggregator();
val expectedAttributes = new HashMap<AttributeId, Long>();
for (int i = 0; i <= recoveryId; i++) {
AttributeUpdaterOperation op = operations.get(i);
// Collect the latest values from this update.
op.getAttributeUpdates().stream().filter(au -> !Attributes.isCoreAttribute(au.getAttributeId())).forEach(au -> expectedAttributes.put(au.getAttributeId(), au.getValue()));
aggregator.add(op);
// We only expect to process an op if its SeqNo is beyond the last one we committed.
boolean expectedToProcess = op.getSequenceNumber() > lastPersistedSeqNo;
if (expectedToProcess) {
addExtendedAttributes(op, outstandingAttributes);
firstOutstandingSeqNo.compareAndSet(Operation.NO_SEQUENCE_NUMBER, op.getSequenceNumber());
lastOutstandingSeqNo.set(op.getSequenceNumber());
}
Assert.assertEquals("Unexpected LUSN.", firstOutstandingSeqNo.get(), aggregator.getLowestUncommittedSequenceNumber());
boolean expectFlush = outstandingAttributes.size() >= config.getFlushAttributesThreshold();
Assert.assertEquals("Unexpected value returned by mustFlush() (count threshold).", expectFlush, aggregator.mustFlush());
if (expectFlush) {
// Call flush() and inspect the result.
WriterFlushResult flushResult = aggregator.flush(TIMEOUT).join();
Assert.assertEquals("Not all attributes were flushed (count threshold).", outstandingAttributes.size(), flushResult.getFlushedAttributes());
// We want to verify just those attributes that we flushed, not all of them (not all may be in yet).
AssertExtensions.assertMapEquals("Unexpected attributes stored in AttributeIndex.", expectedAttributes, context.dataSource.getPersistedAttributes(SEGMENT_ID));
checkAutoAttributesEventual(lastOutstandingSeqNo.get(), context);
outstandingAttributes.clear();
firstOutstandingSeqNo.set(Operation.NO_SEQUENCE_NUMBER);
lastOutstandingSeqNo.set(Operation.NO_SEQUENCE_NUMBER);
}
}
// We have reached the end. Flush the rest and perform a full check.
if (recoveryId == operations.size() - 1) {
aggregator.add(generateSealAndUpdateMetadata(context));
aggregator.flush(TIMEOUT).join();
checkAttributes(context);
checkAutoAttributesEventual(lastOutstandingSeqNo.get(), context);
}
}
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class AttributeAggregatorTests method testFlushWithGenericErrors.
/**
* Tests {@link AttributeAggregator#flush} in the presence of generic errors.
*/
@Test
public void testFlushWithGenericErrors() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
@Cleanup TestContext context = new TestContext(config);
// Add a single operation, which alone should trigger the flush.
AttributeUpdaterOperation op = generateUpdateAttributesAndUpdateMetadata(config.getFlushAttributesThreshold(), context);
context.aggregator.add(op);
Assert.assertTrue("Unexpected result from mustFlush().", context.aggregator.mustFlush());
// Cause the attribute update to fail, and validate that the error is bubbled up.
context.dataSource.setPersistAttributesErrorInjector(new ErrorInjector<>(i -> true, IntentionalException::new));
AssertExtensions.assertSuppliedFutureThrows("Expected flush() to have failed.", () -> context.aggregator.flush(TIMEOUT), ex -> ex instanceof IntentionalException);
Assert.assertTrue("Unexpected result from mustFlush() after failed attempt.", context.aggregator.mustFlush());
checkAutoAttributes(Operation.NO_SEQUENCE_NUMBER, context);
// Now try again, without errors.
context.dataSource.setPersistAttributesErrorInjector(null);
val result = context.aggregator.flush(TIMEOUT).join();
Assert.assertEquals("Unexpected number of attributes flushed", op.getAttributeUpdates().size() - 1, // Subtract 1 for core attributes.
result.getFlushedAttributes());
checkAttributes(context);
checkAutoAttributesEventual(op.getSequenceNumber(), context);
}
Aggregations