use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class AsyncTableEntryReaderTests method testReadKeyResultTooShort.
/**
* Tests the ability to handle a case where the key could not be read before the read result was done.
*/
@Test
public void testReadKeyResultTooShort() {
val testItems = generateTestItems();
for (val e : testItems) {
// Start a new reader & processor for this key-serialization pair.
val keyReader = AsyncTableEntryReader.readKey(1L, SERIALIZER, new TimeoutTimer(TIMEOUT));
@Cleanup val rr = new ReadResultMock(e.serialization, e.key.length - 1, 1);
AsyncReadResultProcessor.process(rr, keyReader, executorService());
AssertExtensions.assertThrows("Unexpected behavior for shorter read result.", () -> keyReader.getResult().get(BASE_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS), ex -> ex instanceof SerializationException);
}
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class FixedKeyLengthTableCompactorTests method testCompactionConcurrentUpdate.
/**
* Tests the case when a compaction executes concurrently with one of compact-copied keys being updated. This is a
* scenario specific to the Fixed-Key-Length Table Segment as the indexing is done at the time of the update
* and not in the background (and hence in sequence).
*/
@Test
public void testCompactionConcurrentUpdate() {
@Cleanup val context = createContext(KEY_COUNT * UPDATE_ENTRY_LENGTH);
val rnd = new Random(0);
// Generate keys.
val keys = new ArrayList<BufferView>();
val expectedValues = new HashMap<BufferView, BufferView>();
for (int i = 0; i < KEY_COUNT; i++) {
byte[] key = new byte[KEY_LENGTH];
rnd.nextBytes(key);
keys.add(new ByteArraySegment(key));
}
// Set utilization threshold to 76% so that we may trigger a compaction when we update half the keys once.
context.setSegmentState(0, 0, 0, 0, 76);
// Insert all the keys ...
for (val key : keys) {
expectedValues.put(key, updateKey(key, context, rnd));
}
// ... then update the second half. This should require a compaction which results in a copy.
for (int i = keys.size() / 2; i < keys.size(); i++) {
expectedValues.put(keys.get(i), updateKey(keys.get(i), context, rnd));
}
val originalLength = context.segmentMetadata.getLength();
val c = context.getCompactor();
Assert.assertEquals("Unexpected number of unique entries pre-compaction.", expectedValues.size(), (long) c.getUniqueEntryCount().join());
Assert.assertEquals("Unexpected number of total entries pre-compaction.", expectedValues.size() + expectedValues.size() / 2, IndexReader.getTotalEntryCount(context.segmentMetadata));
Assert.assertTrue("Expecting a compaction to be required.", c.isCompactionRequired().join());
context.segment.setBeforeAppendCallback(() -> {
// This callback is invoked while the compactor is running; it is after it has read and processed all candidates
// and immediately before the conditional append it performs is about to be executed.
// We can now update one of the keys that are copied with a new value.
// Make sure we don't end up in an infinite loop here.
context.segment.setBeforeAppendCallback(null);
val firstKey = keys.get(0);
expectedValues.put(firstKey, updateKey(firstKey, context, rnd));
});
c.compact(new TimeoutTimer(TIMEOUT)).join();
// We should now verify that the compaction did eventually succeed and that all the keys have the correct (expected) values.
AssertExtensions.assertGreaterThan("Segment length did not change.", originalLength, context.segmentMetadata.getLength());
AssertExtensions.assertGreaterThan("No compaction occurred.", 0, IndexReader.getCompactionOffset(context.segmentMetadata));
Assert.assertEquals("Unexpected number of unique entries post-compaction.", expectedValues.size(), (long) c.getUniqueEntryCount().join());
Assert.assertEquals("Unexpected number of total entries post-compaction.", expectedValues.size(), IndexReader.getTotalEntryCount(context.segmentMetadata));
// Read all the entries from the segment and validate that they are as expected.
val actualValues = new HashMap<BufferView, BufferView>();
context.segment.attributeIterator(AttributeId.Variable.minValue(KEY_LENGTH), AttributeId.Variable.maxValue(KEY_LENGTH), TIMEOUT).join().forEachRemaining(attributeValues -> {
for (val av : attributeValues) {
val reader = BufferView.wrap(context.segment.read(av.getValue(), UPDATE_ENTRY_LENGTH, TIMEOUT).readRemaining(UPDATE_ENTRY_LENGTH, TIMEOUT)).getBufferViewReader();
try {
val e = AsyncTableEntryReader.readEntryComponents(reader, av.getValue(), context.serializer);
Assert.assertEquals("Mismatch keys.", av.getKey().toBuffer(), e.getKey());
actualValues.put(e.getKey(), e.getValue());
} catch (SerializationException ex) {
throw new CompletionException(ex);
}
}
}, executorService()).join();
AssertExtensions.assertMapEquals("Unexpected entries in the segment after compaction.", expectedValues, actualValues);
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class IndexReaderWriterTests method updateKeys.
private long updateKeys(Map<BufferView, Long> keysWithOffset, IndexWriter w, HashMap<Long, BufferView> existingKeys, SegmentMock segment) {
val timer = new TimeoutTimer(TIMEOUT);
val keyUpdates = keysWithOffset.entrySet().stream().map(e -> new BucketUpdate.KeyUpdate(e.getKey(), decodeOffset(e.getValue()), decodeOffset(e.getValue()), isRemoveOffset(e.getValue()))).sorted(Comparator.comparingLong(BucketUpdate.KeyUpdate::getOffset)).collect(Collectors.toList());
// This is the value that we will set TABLE_INDEX_NODE to. It is not any key's offset (and we don't really care what its value is)
long firstKeyOffset = keyUpdates.get(0).getOffset();
long postIndexOffset = keyUpdates.get(keyUpdates.size() - 1).getOffset() + 2 * MAX_KEY_LENGTH;
// Generate the BucketUpdate for the key.
val builders = w.groupByBucket(segment, keyUpdates, timer).join();
// Fetch existing keys.
val oldOffsets = new ArrayList<Long>();
val entryCount = new AtomicLong(IndexReader.getEntryCount(segment.getInfo()));
long initialTotalEntryCount = IndexReader.getTotalEntryCount(segment.getInfo());
int totalEntryCountDelta = 0;
val bucketUpdates = new ArrayList<BucketUpdate>();
for (val builder : builders) {
w.getBucketOffsets(segment, builder.getBucket(), timer).join().forEach(offset -> {
BufferView existingKey = existingKeys.getOrDefault(offset, null);
Assert.assertNotNull("Existing bucket points to non-existing key.", existingKey);
builder.withExistingKey(new BucketUpdate.KeyInfo(existingKey, offset, offset));
// Key replacement; remove this offset.
if (keysWithOffset.containsKey(existingKey)) {
oldOffsets.add(offset);
// Replaced or removed, we'll add it back if replaced.
entryCount.decrementAndGet();
}
});
// Add back the count of all keys that have been updated or added; we've already discounted all updates, insertions
// and removals above, so adding just the updates and insertions will ensure the expected count is accurate.
val bu = builder.build();
bucketUpdates.add(bu);
val deletedCount = bu.getKeyUpdates().stream().filter(BucketUpdate.KeyUpdate::isDeleted).count();
entryCount.addAndGet(bu.getKeyUpdates().size() - deletedCount);
totalEntryCountDelta += bu.getKeyUpdates().size();
}
// Apply the updates.
val attrCount = w.updateBuckets(segment, bucketUpdates, firstKeyOffset, postIndexOffset, totalEntryCountDelta, TIMEOUT).join();
AssertExtensions.assertGreaterThan("Expected at least one attribute to be modified.", 0, attrCount);
checkEntryCount(entryCount.get(), segment);
checkTotalEntryCount(initialTotalEntryCount + totalEntryCountDelta, segment);
// Record the key as being updated.
oldOffsets.forEach(existingKeys::remove);
keysWithOffset.forEach((key, offset) -> {
if (isRemoveOffset(offset)) {
existingKeys.remove(decodeOffset(offset), key);
} else {
existingKeys.put(decodeOffset(offset), key);
}
});
return postIndexOffset;
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class TableBucketReaderTests method testFindAll.
@SneakyThrows
private <T> void testFindAll(GetBucketReader<T> createReader, Function<TableEntry, T> getItem, BiPredicate<T, T> areEqual) {
val segment = new SegmentMock(executorService());
// Generate our test data and append it to the segment.
val data = generateData();
segment.append(new ByteArraySegment(data.serialization), null, TIMEOUT).join();
// Generate a deleted key and append it to the segment.
val deletedKey = data.entries.get(0).getKey();
val es = new EntrySerializer();
val deletedData = es.serializeRemoval(Collections.singleton(deletedKey));
long newBucketOffset = segment.append(deletedData, null, TIMEOUT).join();
data.backpointers.put(newBucketOffset, data.getBucketOffset());
// Create a new TableBucketReader and get all the requested items for this bucket. We pass the offset of the
// deleted entry to make sure its data is not included.
val reader = createReader.apply(segment, (s, offset, timeout) -> CompletableFuture.completedFuture(data.getBackpointer(offset)), executorService());
val result = reader.findAllExisting(newBucketOffset, new TimeoutTimer(TIMEOUT)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// We expect to find all non-deleted Table Items that are linked.
val expectedResult = data.entries.stream().filter(e -> data.backpointers.containsValue(e.getKey().getVersion())).map(getItem).collect(Collectors.toList());
AssertExtensions.assertContainsSameElements("Unexpected result from findAll().", expectedResult, result, (i1, i2) -> areEqual.test(i1, i2) ? 0 : 1);
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class TableBucketReaderTests method testFindEntry.
/**
* Tests the ability to locate Table Entries in a Table Bucket using {@link TableBucketReader#key}.
*/
@Test
public void testFindEntry() throws Exception {
val segment = new SegmentMock(executorService());
// Generate our test data and append it to the segment.
val data = generateData();
segment.append(new ByteArraySegment(data.serialization), null, TIMEOUT).join();
val reader = TableBucketReader.entry(segment, (s, offset, timeout) -> CompletableFuture.completedFuture(data.getBackpointer(offset)), executorService());
// Check a valid result.
val validEntry = data.entries.get(1);
val validResult = reader.find(validEntry.getKey().getKey(), data.getBucketOffset(), new TimeoutTimer(TIMEOUT)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertEquals("Unexpected version from valid key.", data.getEntryOffset(1), validResult.getKey().getVersion());
Assert.assertEquals("Unexpected 'valid' key returned.", validEntry.getKey().getKey(), validResult.getKey().getKey());
Assert.assertEquals("Unexpected 'valid' key returned.", validEntry.getValue(), validResult.getValue());
// Check a key that does not exist.
val invalidKey = data.unlinkedEntry.getKey();
val invalidResult = reader.find(invalidKey.getKey(), data.getBucketOffset(), new TimeoutTimer(TIMEOUT)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertNull("Not expecting any result for key that does not exist.", invalidResult);
}
Aggregations