use of io.pravega.segmentstore.server.SegmentMock in project pravega by pravega.
the class ContainerEventProcessorTests method testInitializationException.
/**
* Check that if the creation of the EventProcessor fails, the future is completed exceptionally.
*
* @throws Exception
*/
@Test(timeout = 10000)
public void testInitializationException() throws Exception {
AtomicBoolean induceFailure = new AtomicBoolean(true);
Function<String, CompletableFuture<DirectSegmentAccess>> failingSegmentSupplier = s -> induceFailure.getAndSet(!induceFailure.get()) ? CompletableFuture.failedFuture(new IntentionalException()) : CompletableFuture.completedFuture(new SegmentMock(this.executorService()));
@Cleanup ContainerEventProcessorImpl eventProcessorService = new ContainerEventProcessorImpl(0, failingSegmentSupplier, ITERATION_DELAY, CONTAINER_OPERATION_TIMEOUT, this.executorService());
int maxItemsProcessed = 10;
int maxOutstandingBytes = 4 * 1024 * 1024;
int truncationDataSize = 500;
ContainerEventProcessor.EventProcessorConfig config = new ContainerEventProcessor.EventProcessorConfig(maxItemsProcessed, maxOutstandingBytes, truncationDataSize);
// Verify that if the creation of the EventProcessor takes too long, the future completes exceptionally.
AssertExtensions.assertFutureThrows("Expected future exceptionally complete with IntentionalException", eventProcessorService.forConsumer("testExceptionForConsumer", l -> null, config), ex -> ex instanceof IntentionalException);
// If the call has failed, the future for that EventProcessor should have been removed from the map.
Assert.assertNull(eventProcessorService.getEventProcessorMap().get("testExceptionForConsumer"));
// The next call is expected to succeed, so the future should be in the map when this call completes.
Assert.assertNotNull(eventProcessorService.forConsumer("testExceptionForConsumer", l -> null, config).join());
Assert.assertNotNull(eventProcessorService.getEventProcessorMap().get("testExceptionForConsumer"));
AssertExtensions.assertFutureThrows("Expected future exceptionally complete with IntentionalException", eventProcessorService.forDurableQueue("testExceptionForDurableQueue"), ex -> ex instanceof IntentionalException);
Assert.assertNull(eventProcessorService.getEventProcessorMap().get("testExceptionForDurableQueue"));
Assert.assertNotNull(eventProcessorService.forDurableQueue("testExceptionForDurableQueue").join());
Assert.assertNotNull(eventProcessorService.getEventProcessorMap().get("testExceptionForDurableQueue"));
}
use of io.pravega.segmentstore.server.SegmentMock in project pravega by pravega.
the class IndexReaderWriterTests method newMock.
private SegmentMock newMock() {
val mock = new SegmentMock(executorService());
mock.updateAttributes(TableAttributes.DEFAULT_VALUES);
return mock;
}
use of io.pravega.segmentstore.server.SegmentMock in project pravega by pravega.
the class IndexReaderWriterTests method checkIndex.
private void checkIndex(Collection<BufferView> allKeys, Map<Long, BufferView> existingKeysByOffset, IndexWriter w, KeyHasher hasher, SegmentMock segment) {
val timer = new TimeoutTimer(TIMEOUT);
// Group all keys by their full hash (each hash should translate to a bucket), and make sure they're ordered by
// offset (in descending order - so we can verify backpointer ordering).
val existingKeys = existingKeysByOffset.entrySet().stream().collect(Collectors.toMap(Map.Entry::getValue, Map.Entry::getKey));
val keysByHash = allKeys.stream().map(key -> new BucketUpdate.KeyInfo(key, existingKeys.getOrDefault(key, NO_OFFSET), existingKeys.getOrDefault(key, NO_OFFSET))).sorted(// Reverse order.
(k1, k2) -> Long.compare(k2.getOffset(), k1.getOffset())).collect(Collectors.groupingBy(keyInfo -> hasher.hash(keyInfo.getKey())));
int existentBucketCount = 0;
val buckets = w.locateBuckets(segment, keysByHash.keySet(), timer).join();
for (val e : keysByHash.entrySet()) {
val hash = e.getKey();
val keys = e.getValue();
val bucket = buckets.get(hash);
Assert.assertNotNull("No bucket found for hash " + hash, bucket);
boolean allDeleted = keys.stream().allMatch(k -> k.getOffset() == NO_OFFSET);
Assert.assertNotEquals("Only expecting inexistent bucket when all its keys are deleted " + hash, allDeleted, bucket.exists());
val bucketOffsets = w.getBucketOffsets(segment, bucket, timer).join();
// Verify that we didn't return too many or too few keys.
if (allDeleted) {
Assert.assertEquals("Not expecting any offsets to be returned for bucket: " + hash, 0, bucketOffsets.size());
} else {
AssertExtensions.assertGreaterThan("Expected at least one offset to be returned for bucket: " + hash, 0, bucketOffsets.size());
existentBucketCount++;
}
AssertExtensions.assertLessThanOrEqual("Too many offsets returned for bucket: " + hash, keys.size(), bucketOffsets.size());
// Verify returned keys are as expected.
for (int i = 0; i < bucketOffsets.size(); i++) {
long actualOffset = bucketOffsets.get(i);
long expectedOffset = keys.get(i).getOffset();
String id = String.format("{%s[%s]}", hash, i);
// In this loop, we do not expect to have Deleted Keys. If our Expected Offset indicates this key should
// have been deleted, then getBucketOffsets() should not have returned this.
Assert.assertNotEquals("Expecting a deleted key but found existing one: " + id, NO_OFFSET, expectedOffset);
Assert.assertEquals("Unexpected key offset in bucket " + id, expectedOffset, actualOffset);
}
if (bucketOffsets.size() < keys.size()) {
val prevKeyOffset = keys.get(bucketOffsets.size()).getOffset();
Assert.assertEquals("Missing key from bucket " + hash, NO_OFFSET, prevKeyOffset);
}
}
checkEntryCount(existingKeysByOffset.size(), segment);
checkBucketCount(existentBucketCount, segment);
}
use of io.pravega.segmentstore.server.SegmentMock in project pravega by pravega.
the class TableBucketReaderTests method testFindKey.
/**
* Tests the ability to locate Table Keys in a Table Bucket using {@link TableBucketReader#key}.
*/
@Test
public void testFindKey() throws Exception {
val segment = new SegmentMock(executorService());
// Generate our test data and append it to the segment.
val data = generateData();
segment.append(new ByteArraySegment(data.serialization), null, TIMEOUT).join();
val reader = TableBucketReader.key(segment, (s, offset, timeout) -> CompletableFuture.completedFuture(data.getBackpointer(offset)), executorService());
// Check a valid result.
val validKey = data.entries.get(1).getKey();
val validResult = reader.find(validKey.getKey(), data.getBucketOffset(), new TimeoutTimer(TIMEOUT)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertEquals("Unexpected version from valid key.", data.getEntryOffset(1), validResult.getVersion());
Assert.assertEquals("Unexpected 'valid' key returned.", validKey.getKey(), validResult.getKey());
// Check a key that does not exist.
val invalidKey = data.unlinkedEntry.getKey();
val invalidResult = reader.find(invalidKey.getKey(), data.getBucketOffset(), new TimeoutTimer(TIMEOUT)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertNull("Not expecting any result for key that does not exist.", invalidResult);
}
use of io.pravega.segmentstore.server.SegmentMock in project pravega by pravega.
the class TableBucketReaderTests method testFindAll.
@SneakyThrows
private <T> void testFindAll(GetBucketReader<T> createReader, Function<TableEntry, T> getItem, BiPredicate<T, T> areEqual) {
val segment = new SegmentMock(executorService());
// Generate our test data and append it to the segment.
val data = generateData();
segment.append(new ByteArraySegment(data.serialization), null, TIMEOUT).join();
// Generate a deleted key and append it to the segment.
val deletedKey = data.entries.get(0).getKey();
val es = new EntrySerializer();
val deletedData = es.serializeRemoval(Collections.singleton(deletedKey));
long newBucketOffset = segment.append(deletedData, null, TIMEOUT).join();
data.backpointers.put(newBucketOffset, data.getBucketOffset());
// Create a new TableBucketReader and get all the requested items for this bucket. We pass the offset of the
// deleted entry to make sure its data is not included.
val reader = createReader.apply(segment, (s, offset, timeout) -> CompletableFuture.completedFuture(data.getBackpointer(offset)), executorService());
val result = reader.findAllExisting(newBucketOffset, new TimeoutTimer(TIMEOUT)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// We expect to find all non-deleted Table Items that are linked.
val expectedResult = data.entries.stream().filter(e -> data.backpointers.containsValue(e.getKey().getVersion())).map(getItem).collect(Collectors.toList());
AssertExtensions.assertContainsSameElements("Unexpected result from findAll().", expectedResult, result, (i1, i2) -> areEqual.test(i1, i2) ? 0 : 1);
}
Aggregations