use of io.pravega.segmentstore.contracts.tables.TableKey in project pravega by pravega.
the class ContainerKeyIndexTests method testConditionalUpdates.
/**
* Tests the ability of the {@link ContainerKeyIndex} to perform multi-key (batch) conditional updates. The conditions
* are based both on pre-existing Key's versions and non-existing keys.
*/
@Test
public void testConditionalUpdates() throws Exception {
final int versionedKeysPerBatch = 20;
final int iterationCount = 10;
@Cleanup val context = new TestContext();
// Generate a set of unversioned keys.
// At each iteration, pick a set of them and condition them on the previous Key's values being there.
// The versioned set should overlap with the previous update's versioned set.
// Each iteration updates all the keys.
val unversionedKeys = generateUnversionedKeys(BATCH_SIZE, context);
val updates = new ArrayList<UpdateItem>();
long nextOffset = 0;
for (int i = 0; i < iterationCount; i++) {
val versionedCandidates = unversionedKeys.subList(i, i + versionedKeysPerBatch);
List<TableKey> versionedKeys;
if (updates.isEmpty()) {
// First update (insertion): condition on not existing.
versionedKeys = versionedCandidates.stream().map(k -> TableKey.notExists(k.getKey())).collect(Collectors.toList());
} else {
// Subsequent update: condition on previous value.
UpdateItem lastUpdate = updates.get(updates.size() - 1);
versionedKeys = new ArrayList<>();
for (int j = 0; j < versionedCandidates.size(); j++) {
// Calculate the expected version. That is the offset of this item in the previous update.
long version = lastUpdate.offset.get() + lastUpdate.batch.getItems().get(i + j).getOffset();
versionedKeys.add(TableKey.versioned(versionedCandidates.get(j).getKey(), version));
}
}
val batch = toUpdateBatch(unversionedKeys.subList(0, i), versionedKeys, unversionedKeys.subList(i + versionedKeys.size(), unversionedKeys.size()));
val persist = new CompletableFuture<Long>();
val update = context.index.update(context.segment, batch, () -> persist, context.timer);
val updateItem = new UpdateItem(batch, persist, update);
updateItem.offset.set(nextOffset);
updates.add(updateItem);
nextOffset += batch.getLength();
}
// Complete the persists on each update, and verify no update has been completed.
updates.stream().skip(1).forEach(u -> u.persist.complete(u.offset.get()));
for (val u : updates) {
Assert.assertFalse("Not expecting update to be done yet.", u.update.isDone());
}
// Complete the first persist and verify that the updates were released in order (no exceptions).
updates.get(0).persist.complete(updates.get(0).offset.get());
for (val u : updates) {
val updateResult = u.update.get(SHORT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
Assert.assertEquals("Unexpected number of buckets returned.", u.batch.getItems().size(), updateResult.size());
for (int i = 0; i < updateResult.size(); i++) {
long expectedOffset = u.persist.join() + u.batch.getItems().get(i).getOffset();
long actualOffset = updateResult.get(i);
Assert.assertEquals("Unexpected offset for result index " + i, expectedOffset, actualOffset);
}
}
// Check final result.
checkPrevailingUpdate(updates, context);
// We can safely check backpointers here.
checkBackpointers(updates, context);
}
use of io.pravega.segmentstore.contracts.tables.TableKey in project pravega by pravega.
the class AsyncTableEntryReaderTests method testBufferCompaction.
private <T> void testBufferCompaction(GetEntryReader<T> createReader, Function<T, TableKey> getKey, Function<T, BufferView> getValue) throws Exception {
// Must be less than AsyncTableEntryReader.INITIAL_READ_LENGTH / 2 (to ease testing).
val keyLength = 3987;
// Must be less than AsyncTableEntryReader.INITIAL_READ_LENGTH / 2 (to ease testing)..
val valueLength = 3123;
val serializer = new EntrySerializer();
// Generate a number of entries. We only care about the first one, but we want to ensure that we have enough other
// data to force the ReadResult to try to read more.
val testItems = generateTestItems(() -> keyLength, () -> valueLength);
val entries = testItems.stream().filter(i -> !i.isRemoval).map(i -> TableEntry.unversioned(new ByteArraySegment(i.key), new ByteArraySegment(i.value))).collect(Collectors.toList());
// Search for the first Key/Entry. This makes it easier as we don't have to guess the versions, offsets, etc.
val soughtEntry = entries.get(0);
val segmentData = serializer.serializeUpdate(entries).getCopy();
@Cleanup val readResultNoCompact = new ReadResultMock(segmentData, keyLength + valueLength + 20, keyLength + 200);
val readerNoCompact = createReader.apply(soughtEntry.getKey().getKey(), 0L, serializer, new TimeoutTimer(TIMEOUT));
testBufferCompaction(readerNoCompact, readResultNoCompact, getKey, getValue, false);
@Cleanup val readResultWithCompact = new ReadResultMock(segmentData, segmentData.length, segmentData.length);
val readerWithCompact = createReader.apply(soughtEntry.getKey().getKey(), 0L, serializer, new TimeoutTimer(TIMEOUT));
testBufferCompaction(readerWithCompact, readResultWithCompact, getKey, getValue, true);
}
Aggregations