use of org.junit.rules.Timeout in project pravega by pravega.
the class AttributeAggregatorTests method testFlushWithExpectedErrors.
/**
* Tests {@link AttributeAggregator#flush} in the presence of expected errors (when the segment is sealed or deleted).
*/
@Test
public void testFlushWithExpectedErrors() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
@Cleanup TestContext context = new TestContext(config);
// Add a single operation, which alone should trigger the flush.
val op1 = generateUpdateAttributesAndUpdateMetadata(config.getFlushAttributesThreshold(), context);
context.aggregator.add(op1);
Assert.assertTrue("Unexpected result from mustFlush().", context.aggregator.mustFlush());
// Segment is reported to be sealed.
context.dataSource.setPersistAttributesErrorInjector(new ErrorInjector<>(i -> true, () -> new StreamSegmentSealedException(SEGMENT_NAME)));
AssertExtensions.assertSuppliedFutureThrows("Expected flush() to have failed for a sealed attribute index (but unsealed segment).", () -> context.aggregator.flush(TIMEOUT), ex -> ex instanceof StreamSegmentSealedException);
context.segmentMetadata.markSealed();
Assert.assertTrue("Unexpected result from mustFlush() after failed attempt (seal).", context.aggregator.mustFlush());
val resultSeal = context.aggregator.flush(TIMEOUT).join();
Assert.assertEquals("Unexpected number of attributes flushed after seal.", op1.getAttributeUpdates().size() - 1, // Subtract 1 for core attributes.
resultSeal.getFlushedAttributes());
Assert.assertFalse("Unexpected result from mustFlush() after successful attempt (seal).", context.aggregator.mustFlush());
// Segment is sealed, so we can't change this value
checkAutoAttributes(Operation.NO_SEQUENCE_NUMBER, context);
// Segment is reported to be be deleted.
val op2 = generateUpdateAttributesAndUpdateMetadata(config.getFlushAttributesThreshold(), context);
context.aggregator.add(op2);
context.dataSource.setPersistAttributesErrorInjector(new ErrorInjector<>(i -> true, () -> new StreamSegmentNotExistsException(SEGMENT_NAME)));
AssertExtensions.assertSuppliedFutureThrows("Expected flush() to have failed for a deleted attribute index (but unsealed segment).", () -> context.aggregator.flush(TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
Assert.assertTrue("Unexpected result from mustFlush() after failed attempt (deleted).", context.aggregator.mustFlush());
context.segmentMetadata.markDeleted();
Assert.assertFalse("Unexpected result from mustFlush() after segment deleted.", context.aggregator.mustFlush());
val resultDelete = context.aggregator.flush(TIMEOUT).join();
Assert.assertEquals("Unexpected number of attributes flushed after delete.", 0, resultDelete.getFlushedAttributes());
// Segment is deleted, so this shouldn't change.
checkAutoAttributes(Operation.NO_SEQUENCE_NUMBER, context);
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class ContainerKeyIndexTests method testRecovery.
private void testRecovery(TableExtensionConfig config) throws Exception {
val s = new EntrySerializer();
@Cleanup val context = new TestContext(config);
// Setup the segment with initial attributes.
val iw = new IndexWriter(HASHER, executorService());
// 1. Generate initial set of keys and serialize them to the segment.
val keys = generateUnversionedKeys(BATCH_SIZE, context);
val entries1 = new ArrayList<TableEntry>(keys.size());
val offset = new AtomicLong();
val hashes = new ArrayList<UUID>();
val keysWithOffsets = new HashMap<UUID, KeyWithOffset>();
for (val k : keys) {
val hash = HASHER.hash(k.getKey());
hashes.add(hash);
byte[] valueData = new byte[Math.max(1, context.random.nextInt(100))];
context.random.nextBytes(valueData);
val entry = TableEntry.unversioned(k.getKey(), new ByteArraySegment(valueData));
keysWithOffsets.put(hash, new KeyWithOffset(k.getKey(), offset.getAndAdd(s.getUpdateLength(entry))));
entries1.add(entry);
}
val update1 = s.serializeUpdate(entries1);
Assert.assertEquals(offset.get(), update1.getLength());
context.segment.append(update1, null, TIMEOUT).join();
// 2. Initiate a recovery and verify pre-caching is triggered and requests are auto-unblocked.
val get1 = context.index.getBucketOffsets(context.segment, hashes, context.timer);
val result1 = get1.get(SHORT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
val expected1 = new HashMap<UUID, Long>();
keysWithOffsets.forEach((k, o) -> expected1.put(k, o.offset));
AssertExtensions.assertMapEquals("Unexpected result from getBucketOffsets() after auto pre-caching.", expected1, result1);
// 3. Set LastIdx to Length, and increase by TEST_MAX_TAIL_CACHE_PRE_INDEX_LENGTH + 1 (so we don't do pre-caching).
val buckets = iw.locateBuckets(context.segment, keysWithOffsets.keySet(), context.timer).join();
Collection<BucketUpdate> bucketUpdates = buckets.entrySet().stream().map(e -> {
val builder = BucketUpdate.forBucket(e.getValue());
val ko = keysWithOffsets.get(e.getKey());
builder.withKeyUpdate(new BucketUpdate.KeyUpdate(ko.key, ko.offset, ko.offset, false));
return builder.build();
}).collect(Collectors.toList());
iw.updateBuckets(context.segment, bucketUpdates, 0L, offset.get(), keysWithOffsets.size(), TIMEOUT).join();
context.segment.append(new ByteArraySegment(new byte[TEST_MAX_TAIL_CACHE_PRE_INDEX_LENGTH + 1]), null, TIMEOUT).join();
// 4. Verify pre-caching is disabled and that the requests are blocked.
// Force-evict it so we start clean.
context.index.notifyIndexOffsetChanged(context.segment.getSegmentId(), -1, 0);
val getBucketOffsets = context.index.getBucketOffsets(context.segment, hashes, context.timer);
val backpointerKey = keysWithOffsets.values().stream().findFirst().get();
val getBackpointers = context.index.getBackpointerOffset(context.segment, backpointerKey.offset, context.timer.getRemaining());
val getUnindexedKeys = context.index.getUnindexedKeyHashes(context.segment);
val conditionalUpdateKey = TableKey.notExists(generateUnversionedKeys(1, context).get(0).getKey());
val conditionalUpdate = context.index.update(context.segment, toUpdateBatch(conditionalUpdateKey), () -> CompletableFuture.completedFuture(context.segment.getInfo().getLength() + 1L), context.timer);
Assert.assertFalse("Expected getBucketOffsets() to block.", getBucketOffsets.isDone());
Assert.assertFalse("Expected getBackpointerOffset() to block.", getBackpointers.isDone());
Assert.assertFalse("Expecting conditional update to block.", conditionalUpdate.isDone());
// 4.1. Verify unconditional updates go through.
val unconditionalUpdateKey = generateUnversionedKeys(1, context).get(0);
val unconditionalUpdateResult = context.index.update(context.segment, toUpdateBatch(unconditionalUpdateKey), () -> CompletableFuture.completedFuture(context.segment.getInfo().getLength() + 2L), context.timer).get(SHORT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
Assert.assertEquals("Unexpected result from the non-blocked unconditional update.", context.segment.getInfo().getLength() + 2L, (long) unconditionalUpdateResult.get(0));
// 3. Verify that all operations are unblocked when we reached the expected IndexOffset.
context.index.notifyIndexOffsetChanged(context.segment.getSegmentId(), context.segment.getInfo().getLength() - 1, 0);
Assert.assertFalse("Not expecting anything to be unblocked at this point", getBucketOffsets.isDone() || getBackpointers.isDone() || conditionalUpdate.isDone() || getUnindexedKeys.isDone());
context.index.notifyIndexOffsetChanged(context.segment.getSegmentId(), context.segment.getInfo().getLength(), 0);
val getBucketOffsetsResult = getBucketOffsets.get(SHORT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
val getBackpointersResult = getBackpointers.get(SHORT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
val conditionalUpdateResult = conditionalUpdate.get(SHORT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
val getUnindexedKeysResult = getUnindexedKeys.get(SHORT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
checkKeyOffsets(hashes, keysWithOffsets, getBucketOffsetsResult);
Assert.assertEquals("Unexpected result from unblocked getBackpointerOffset().", -1L, (long) getBackpointersResult);
Assert.assertEquals("Unexpected result from unblocked conditional update.", context.segment.getInfo().getLength() + 1L, (long) conditionalUpdateResult.get(0));
// Depending on the order in which the internal recovery tracker (implemented by CompletableFuture.thenCompose)
// executes its callbacks, the result of this call may be either 1 or 2 (it may unblock prior to the conditional
// update unblocking or the other way around).
Assert.assertTrue("Unexpected result size from unblocked getUnindexedKeyHashes().", getUnindexedKeysResult.size() == 1 || getUnindexedKeysResult.size() == 2);
// However, verify that in the end, we have 2 unindexed keys.
val finalGetUnindexedKeysResult = context.index.getUnindexedKeyHashes(context.segment).get(SHORT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
Assert.assertEquals("Unexpected result size from final getUnindexedKeyHashes().", 2, finalGetUnindexedKeysResult.size());
// 4. Verify no new requests are blocked now.
// A timeout check will suffice
getBucketOffsets.get(SHORT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
// 5. Verify requests are cancelled if we notify the segment has been removed.
context.index.notifyIndexOffsetChanged(context.segment.getSegmentId(), -1L, 0);
val cancelledKey = TableKey.notExists(generateUnversionedKeys(1, context).get(0).getKey());
val cancelledRequest = context.index.update(context.segment, toUpdateBatch(cancelledKey), () -> CompletableFuture.completedFuture(context.segment.getInfo().getLength() + 3L), context.timer);
context.index.notifyIndexOffsetChanged(context.segment.getSegmentId(), -1L, 0);
AssertExtensions.assertFutureThrows("Blocked request was not cancelled when a segment remove notification was received.", cancelledRequest, ex -> ex instanceof CancellationException);
// 6. Verify requests are cancelled (properly) when we close the index.
context.index.notifyIndexOffsetChanged(context.segment.getSegmentId(), -1L, 0);
val cancelledKey2 = TableKey.notExists(generateUnversionedKeys(1, context).get(0).getKey());
val cancelledRequest2 = context.index.update(context.segment, toUpdateBatch(cancelledKey2), () -> CompletableFuture.completedFuture(context.segment.getInfo().getLength() + 4L), context.timer);
context.index.close();
AssertExtensions.assertFutureThrows("Blocked request was not cancelled when a the index was closed.", cancelledRequest2, ex -> ex instanceof ObjectClosedException);
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class ContainerKeyIndexTests method testGetBucketOffsetsNotCached.
/**
* Tests the ability of the {@link ContainerKeyIndex#getBucketOffsets} to retrieve the offsets of buckets that
* were not previously cached.
*/
@Test
public void testGetBucketOffsetsNotCached() {
@Cleanup val context = new TestContext();
// Setup the segment with initial attributes.
val iw = new IndexWriter(HASHER, executorService());
// Generate keys and index them by Hashes and assign offsets. Only half the keys exist; the others do not.
val keys = generateUnversionedKeys(BATCH_SIZE, context);
val offset = new AtomicLong();
val hashes = new ArrayList<UUID>();
val keysWithOffsets = new HashMap<UUID, KeyWithOffset>();
for (val k : keys) {
val hash = HASHER.hash(k.getKey());
hashes.add(hash);
boolean exists = hashes.size() % 2 == 0;
if (exists) {
keysWithOffsets.put(hash, new KeyWithOffset(k.getKey(), offset.getAndAdd(k.getKey().getLength())));
} else {
keysWithOffsets.put(hash, null);
}
}
// Update the keys in the segment (via their buckets).
val buckets = iw.locateBuckets(context.segment, keysWithOffsets.keySet(), context.timer).join();
Collection<BucketUpdate> bucketUpdates = buckets.entrySet().stream().map(e -> {
val builder = BucketUpdate.forBucket(e.getValue());
val ko = keysWithOffsets.get(e.getKey());
if (ko != null) {
builder.withKeyUpdate(new BucketUpdate.KeyUpdate(ko.key, ko.offset, ko.offset, false));
}
return builder.build();
}).collect(Collectors.toList());
iw.updateBuckets(context.segment, bucketUpdates, 0L, 1L, 0, TIMEOUT).join();
// First lookup should go directly to the index. The cache should be empty.
val result1 = context.index.getBucketOffsets(context.segment, hashes, context.timer).join();
checkKeyOffsets(hashes, keysWithOffsets, result1);
// Second lookup should be from the cache (for previous hits) and the rest from the index.
val result2 = context.index.getBucketOffsets(context.segment, hashes, context.timer).join();
checkKeyOffsets(hashes, keysWithOffsets, result2);
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class ContainerKeyIndexTests method testRecoveryWithOneNonIndexedUncompactedEntry.
/**
* Tests a situation in which we have written various values for 2 keys, and the most recent ones are the last
* writes among them. Then, the IndexWriter processes all of them and immediately after, Table Compactor processes a
* subset of these entries, and it writes one compacted (and stale) value to the tail of the Segment. At this point,
* there is a Table Segment recovery for which there is only one un-indexed entry: the last compacted one. The test
* verifies that we are not tail caching the last compacted entry, because it is stale already and could lead to
* serving stale data on gets during a window of time (until IndexWriter processes that compacted entry and evicts
* it from the tail cache).
*
* @throws Exception
*/
@Test
public void testRecoveryWithOneNonIndexedUncompactedEntry() throws Exception {
val s = new EntrySerializer();
@Cleanup val context = new TestContext(TableExtensionConfig.builder().with(TableExtensionConfig.MAX_TAIL_CACHE_PREINDEX_LENGTH, (long) TEST_MAX_TAIL_CACHE_PRE_INDEX_LENGTH).with(TableExtensionConfig.MAX_TAIL_CACHE_PREINDEX_BATCH_SIZE, Integer.MAX_VALUE).with(TableExtensionConfig.RECOVERY_TIMEOUT, (int) ContainerKeyIndexTests.SHORT_TIMEOUT_MILLIS).build());
// Setup the segment with initial attributes.
val iw = new IndexWriter(HASHER, executorService());
// 1. Add several values for key1 and key2 that can be easily picked by the Table Compactor. Note that the
// expected last value in this scenario for key1 is 4.
val entries = Arrays.asList(TableEntry.unversioned(new ByteArraySegment("key1".getBytes(StandardCharsets.UTF_8)), new ByteArraySegment("1".getBytes(StandardCharsets.UTF_8))), TableEntry.unversioned(new ByteArraySegment("key1".getBytes(StandardCharsets.UTF_8)), new ByteArraySegment("2".getBytes(StandardCharsets.UTF_8))), TableEntry.unversioned(new ByteArraySegment("key1".getBytes(StandardCharsets.UTF_8)), new ByteArraySegment("3".getBytes(StandardCharsets.UTF_8))), TableEntry.unversioned(new ByteArraySegment("key2".getBytes(StandardCharsets.UTF_8)), new ByteArraySegment("1".getBytes(StandardCharsets.UTF_8))), TableEntry.unversioned(new ByteArraySegment("key2".getBytes(StandardCharsets.UTF_8)), new ByteArraySegment("2".getBytes(StandardCharsets.UTF_8))), TableEntry.unversioned(new ByteArraySegment("key1".getBytes(StandardCharsets.UTF_8)), new ByteArraySegment("4".getBytes(StandardCharsets.UTF_8))), TableEntry.unversioned(new ByteArraySegment("key2".getBytes(StandardCharsets.UTF_8)), new ByteArraySegment("4".getBytes(StandardCharsets.UTF_8))), TableEntry.unversioned(new ByteArraySegment("key2".getBytes(StandardCharsets.UTF_8)), new ByteArraySegment("5".getBytes(StandardCharsets.UTF_8))));
val offset = new AtomicLong();
val hashes = new ArrayList<UUID>();
val keysWithOffsets = new HashMap<UUID, KeyWithOffset>();
for (val e : entries) {
val hash = HASHER.hash(e.getKey().getKey());
hashes.add(hash);
keysWithOffsets.put(hash, new KeyWithOffset(e.getKey().getKey(), offset.getAndAdd(s.getUpdateLength(e))));
}
// Write all the previous entries to the Segment.
val update1 = s.serializeUpdate(entries);
Assert.assertEquals(offset.get(), update1.getLength());
context.segment.append(update1, null, TIMEOUT).join();
// 2. Initiate a recovery and verify pre-caching is triggered and requests are auto-unblocked.
val get1 = context.index.getBucketOffsets(context.segment, hashes, context.timer);
val result1 = get1.get(SHORT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
val expected1 = new HashMap<UUID, Long>();
keysWithOffsets.forEach((k, o) -> expected1.put(k, o.offset));
AssertExtensions.assertMapEquals("Unexpected result from getBucketOffsets() after auto pre-caching.", expected1, result1);
// 3. Set LastIdx to Length.
val buckets = iw.locateBuckets(context.segment, keysWithOffsets.keySet(), context.timer).join();
Collection<BucketUpdate> bucketUpdates = buckets.entrySet().stream().map(e -> {
val builder = BucketUpdate.forBucket(e.getValue());
val ko = keysWithOffsets.get(e.getKey());
builder.withKeyUpdate(new BucketUpdate.KeyUpdate(ko.key, ko.offset, ko.offset, false));
return builder.build();
}).collect(Collectors.toList());
iw.updateBuckets(context.segment, bucketUpdates, 0L, offset.get(), keysWithOffsets.size(), TIMEOUT).join();
// 4. After indexing, let's write the compacted entry (key1, 3).
val compactedEntry = List.of(TableEntry.versioned(new ByteArraySegment("key1".getBytes(StandardCharsets.UTF_8)), new ByteArraySegment("3".getBytes(StandardCharsets.UTF_8)), s.getUpdateLength(entries.get(0)) * 3L));
val update2 = s.serializeUpdateWithExplicitVersion(compactedEntry);
context.segment.append(update2, null, TIMEOUT).join();
// 5. Verify that when performing a get on key1, the tail-caching is not caching [key1, v3] (last offset), as it
// has scanned a previous value in which key1 is 4 (6th element in entries list) and has been indexed already.
// This already stale compacted entry should not be cached and will be removed by the index when it gets processed.
// Force-evict it so we start clean.
context.index.notifyIndexOffsetChanged(context.segment.getSegmentId(), -1, 0);
val key1hash = List.of(HASHER.hash(entries.get(0).getKey().getKey()));
val getBucketOffsets = context.index.getBucketOffsets(context.segment, key1hash, context.timer).join();
// Ensure that we get key1 as key.
Assert.assertArrayEquals(key1hash.toArray(), getBucketOffsets.keySet().toArray());
// (key1, 4) is the 6th element in entries.
Assert.assertEquals(Long.valueOf(5L * 22L), getBucketOffsets.get(key1hash.get(0)));
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class StorageWriterTests method testForceFlush.
/**
* Tests the {@link StorageWriter#forceFlush} method.
*/
@Test
public void testForceFlush() throws Exception {
// Special config that increases the flush thresholds and keeps the read timeout to something manageable (for empty reads).
val config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, 1024 * 1024).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 5000L).with(WriterConfig.MAX_ITEMS_TO_READ_AT_ONCE, 100).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 100L).with(WriterConfig.MAX_READ_TIMEOUT_MILLIS, 250L).build();
@Cleanup TestContext context = new TestContext(config);
context.writer.startAsync();
ArrayList<Long> segmentIds = createSegments(context);
HashMap<Long, ByteArrayOutputStream> segmentContents = new HashMap<>();
appendDataBreadthFirst(segmentIds, segmentContents, context);
// Do not queue up a checkpoint - make it impossible for the Writer to acknowledge anything on its own path.
// Instead, force-flush and verify the output when done.
val ff1 = context.writer.forceFlush(context.metadata.getOperationSequenceNumber(), TIMEOUT);
val result1 = ff1.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertTrue("Expected something to be flushed.", result1);
// Verify result.
verifyFinalOutput(segmentContents, Collections.emptyList(), context);
// Do a second force-flush. At this point there should be nothing left to flush, so ensure the StorageWriter
// cannot write to Storage or update attributes.
context.storage.close();
context.dataSource.setPersistAttributesErrorInjector(new ErrorInjector<>(i -> true, Exception::new));
val ff2 = context.writer.forceFlush(context.metadata.getOperationSequenceNumber(), TIMEOUT);
val result2 = ff2.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertFalse("Not expected anything to be flushed the second time.", result2);
}
Aggregations