use of io.pravega.common.util.BufferView in project pravega by pravega.
the class SegmentAggregator method flushPendingAppends.
/**
* Flushes all Append Operations that can be flushed up to the maximum allowed flush size.
*
* @param timeout Timeout for the operation.
* @return A CompletableFuture that, when completed, will contain the result from the flush operation.
*/
private CompletableFuture<WriterFlushResult> flushPendingAppends(Duration timeout) {
// Gather an InputStream made up of all the operations we can flush.
BufferView flushData;
try {
flushData = getFlushData();
} catch (DataCorruptionException ex) {
return Futures.failedFuture(ex);
}
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "flushPendingAppends");
// Flush them.
TimeoutTimer timer = new TimeoutTimer(timeout);
CompletableFuture<Void> flush;
if (flushData == null || flushData.getLength() == 0) {
flush = CompletableFuture.completedFuture(null);
} else {
flush = createSegmentIfNecessary(() -> this.storage.write(this.handle.get(), this.metadata.getStorageLength(), flushData.getReader(), flushData.getLength(), timer.getRemaining()), timer.getRemaining());
}
return flush.thenApplyAsync(v -> {
WriterFlushResult result = updateStatePostFlush(flushData);
LoggerHelpers.traceLeave(log, this.traceObjectId, "flushPendingAppends", traceId, result);
return result;
}, this.executor).exceptionally(ex -> {
if (Exceptions.unwrap(ex) instanceof BadOffsetException) {
// We attempted to write at an offset that already contained other data. This can happen for a number of
// reasons, but we do not have enough information here to determine why. We need to enter reconciliation
// mode, which will determine the actual state of the segment in storage and take appropriate actions.
setState(AggregatorState.ReconciliationNeeded);
}
// Rethrow all exceptions.
throw new CompletionException(ex);
});
}
use of io.pravega.common.util.BufferView in project pravega by pravega.
the class TableBucketReader method find.
/**
* Attempts to locate something in a TableBucket that matches a particular key.
*
* @param soughtKey A {@link BufferView} instance representing the Key we are looking for.
* @param bucketOffset The current segment offset of the Table Bucket we are looking into.
* @param timer A {@link TimeoutTimer} for the operation.
* @return A CompletableFuture that, when completed, will contain the desired result, or null of no such result
* was found.
*/
CompletableFuture<ResultT> find(BufferView soughtKey, long bucketOffset, TimeoutTimer timer) {
int maxReadLength = getMaxReadLength();
// Read the Key at the current offset and check it against the sought one.
AtomicLong offset = new AtomicLong(bucketOffset);
CompletableFuture<ResultT> result = new CompletableFuture<>();
Futures.loop(() -> !result.isDone(), () -> {
// Read the Key from the Segment. Copy it out of the Segment to avoid losing it or getting corrupted
// values back in case of a cache eviction. See {@link ReadResult#setCopyOnRead(boolean)}.
ReadResult readResult = this.segment.read(offset.get(), maxReadLength, timer.getRemaining());
val reader = getReader(soughtKey, offset.get(), timer);
AsyncReadResultProcessor.process(readResult, reader, this.executor);
return reader.getResult().thenComposeAsync(r -> {
SearchContinuation sc = processResult(r, soughtKey);
if (sc == SearchContinuation.ResultFound || sc == SearchContinuation.NoResult) {
// We either definitely found the result or definitely did not find the result.
// In the case we did not find what we were looking for, we may still have some
// partial result to return to the caller (i.e., a TableEntry with no value, but with
// a version, which indicates a deleted entry (as opposed from an inexistent one).
result.complete(r);
} else {
return this.getBackpointer.apply(this.segment, offset.get(), timer.getRemaining()).thenAccept(newOffset -> {
offset.set(newOffset);
if (newOffset < 0) {
// Could not find anything.
result.complete(null);
}
});
}
return CompletableFuture.completedFuture(null);
}, this.executor);
}, this.executor).exceptionally(ex -> {
result.completeExceptionally(ex);
return null;
});
return result;
}
use of io.pravega.common.util.BufferView in project pravega by pravega.
the class AsyncTableEntryReaderTests method testBufferCompaction.
private <T> void testBufferCompaction(GetEntryReader<T> createReader, Function<T, TableKey> getKey, Function<T, BufferView> getValue) throws Exception {
// Must be less than AsyncTableEntryReader.INITIAL_READ_LENGTH / 2 (to ease testing).
val keyLength = 3987;
// Must be less than AsyncTableEntryReader.INITIAL_READ_LENGTH / 2 (to ease testing)..
val valueLength = 3123;
val serializer = new EntrySerializer();
// Generate a number of entries. We only care about the first one, but we want to ensure that we have enough other
// data to force the ReadResult to try to read more.
val testItems = generateTestItems(() -> keyLength, () -> valueLength);
val entries = testItems.stream().filter(i -> !i.isRemoval).map(i -> TableEntry.unversioned(new ByteArraySegment(i.key), new ByteArraySegment(i.value))).collect(Collectors.toList());
// Search for the first Key/Entry. This makes it easier as we don't have to guess the versions, offsets, etc.
val soughtEntry = entries.get(0);
val segmentData = serializer.serializeUpdate(entries).getCopy();
@Cleanup val readResultNoCompact = new ReadResultMock(segmentData, keyLength + valueLength + 20, keyLength + 200);
val readerNoCompact = createReader.apply(soughtEntry.getKey().getKey(), 0L, serializer, new TimeoutTimer(TIMEOUT));
testBufferCompaction(readerNoCompact, readResultNoCompact, getKey, getValue, false);
@Cleanup val readResultWithCompact = new ReadResultMock(segmentData, segmentData.length, segmentData.length);
val readerWithCompact = createReader.apply(soughtEntry.getKey().getKey(), 0L, serializer, new TimeoutTimer(TIMEOUT));
testBufferCompaction(readerWithCompact, readResultWithCompact, getKey, getValue, true);
}
use of io.pravega.common.util.BufferView in project pravega by pravega.
the class FixedKeyLengthTableCompactorTests method testCompactionConcurrentUpdate.
/**
* Tests the case when a compaction executes concurrently with one of compact-copied keys being updated. This is a
* scenario specific to the Fixed-Key-Length Table Segment as the indexing is done at the time of the update
* and not in the background (and hence in sequence).
*/
@Test
public void testCompactionConcurrentUpdate() {
@Cleanup val context = createContext(KEY_COUNT * UPDATE_ENTRY_LENGTH);
val rnd = new Random(0);
// Generate keys.
val keys = new ArrayList<BufferView>();
val expectedValues = new HashMap<BufferView, BufferView>();
for (int i = 0; i < KEY_COUNT; i++) {
byte[] key = new byte[KEY_LENGTH];
rnd.nextBytes(key);
keys.add(new ByteArraySegment(key));
}
// Set utilization threshold to 76% so that we may trigger a compaction when we update half the keys once.
context.setSegmentState(0, 0, 0, 0, 76);
// Insert all the keys ...
for (val key : keys) {
expectedValues.put(key, updateKey(key, context, rnd));
}
// ... then update the second half. This should require a compaction which results in a copy.
for (int i = keys.size() / 2; i < keys.size(); i++) {
expectedValues.put(keys.get(i), updateKey(keys.get(i), context, rnd));
}
val originalLength = context.segmentMetadata.getLength();
val c = context.getCompactor();
Assert.assertEquals("Unexpected number of unique entries pre-compaction.", expectedValues.size(), (long) c.getUniqueEntryCount().join());
Assert.assertEquals("Unexpected number of total entries pre-compaction.", expectedValues.size() + expectedValues.size() / 2, IndexReader.getTotalEntryCount(context.segmentMetadata));
Assert.assertTrue("Expecting a compaction to be required.", c.isCompactionRequired().join());
context.segment.setBeforeAppendCallback(() -> {
// This callback is invoked while the compactor is running; it is after it has read and processed all candidates
// and immediately before the conditional append it performs is about to be executed.
// We can now update one of the keys that are copied with a new value.
// Make sure we don't end up in an infinite loop here.
context.segment.setBeforeAppendCallback(null);
val firstKey = keys.get(0);
expectedValues.put(firstKey, updateKey(firstKey, context, rnd));
});
c.compact(new TimeoutTimer(TIMEOUT)).join();
// We should now verify that the compaction did eventually succeed and that all the keys have the correct (expected) values.
AssertExtensions.assertGreaterThan("Segment length did not change.", originalLength, context.segmentMetadata.getLength());
AssertExtensions.assertGreaterThan("No compaction occurred.", 0, IndexReader.getCompactionOffset(context.segmentMetadata));
Assert.assertEquals("Unexpected number of unique entries post-compaction.", expectedValues.size(), (long) c.getUniqueEntryCount().join());
Assert.assertEquals("Unexpected number of total entries post-compaction.", expectedValues.size(), IndexReader.getTotalEntryCount(context.segmentMetadata));
// Read all the entries from the segment and validate that they are as expected.
val actualValues = new HashMap<BufferView, BufferView>();
context.segment.attributeIterator(AttributeId.Variable.minValue(KEY_LENGTH), AttributeId.Variable.maxValue(KEY_LENGTH), TIMEOUT).join().forEachRemaining(attributeValues -> {
for (val av : attributeValues) {
val reader = BufferView.wrap(context.segment.read(av.getValue(), UPDATE_ENTRY_LENGTH, TIMEOUT).readRemaining(UPDATE_ENTRY_LENGTH, TIMEOUT)).getBufferViewReader();
try {
val e = AsyncTableEntryReader.readEntryComponents(reader, av.getValue(), context.serializer);
Assert.assertEquals("Mismatch keys.", av.getKey().toBuffer(), e.getKey());
actualValues.put(e.getKey(), e.getValue());
} catch (SerializationException ex) {
throw new CompletionException(ex);
}
}
}, executorService()).join();
AssertExtensions.assertMapEquals("Unexpected entries in the segment after compaction.", expectedValues, actualValues);
}
use of io.pravega.common.util.BufferView in project pravega by pravega.
the class IndexReaderWriterTests method updateKeys.
private long updateKeys(Map<BufferView, Long> keysWithOffset, IndexWriter w, HashMap<Long, BufferView> existingKeys, SegmentMock segment) {
val timer = new TimeoutTimer(TIMEOUT);
val keyUpdates = keysWithOffset.entrySet().stream().map(e -> new BucketUpdate.KeyUpdate(e.getKey(), decodeOffset(e.getValue()), decodeOffset(e.getValue()), isRemoveOffset(e.getValue()))).sorted(Comparator.comparingLong(BucketUpdate.KeyUpdate::getOffset)).collect(Collectors.toList());
// This is the value that we will set TABLE_INDEX_NODE to. It is not any key's offset (and we don't really care what its value is)
long firstKeyOffset = keyUpdates.get(0).getOffset();
long postIndexOffset = keyUpdates.get(keyUpdates.size() - 1).getOffset() + 2 * MAX_KEY_LENGTH;
// Generate the BucketUpdate for the key.
val builders = w.groupByBucket(segment, keyUpdates, timer).join();
// Fetch existing keys.
val oldOffsets = new ArrayList<Long>();
val entryCount = new AtomicLong(IndexReader.getEntryCount(segment.getInfo()));
long initialTotalEntryCount = IndexReader.getTotalEntryCount(segment.getInfo());
int totalEntryCountDelta = 0;
val bucketUpdates = new ArrayList<BucketUpdate>();
for (val builder : builders) {
w.getBucketOffsets(segment, builder.getBucket(), timer).join().forEach(offset -> {
BufferView existingKey = existingKeys.getOrDefault(offset, null);
Assert.assertNotNull("Existing bucket points to non-existing key.", existingKey);
builder.withExistingKey(new BucketUpdate.KeyInfo(existingKey, offset, offset));
// Key replacement; remove this offset.
if (keysWithOffset.containsKey(existingKey)) {
oldOffsets.add(offset);
// Replaced or removed, we'll add it back if replaced.
entryCount.decrementAndGet();
}
});
// Add back the count of all keys that have been updated or added; we've already discounted all updates, insertions
// and removals above, so adding just the updates and insertions will ensure the expected count is accurate.
val bu = builder.build();
bucketUpdates.add(bu);
val deletedCount = bu.getKeyUpdates().stream().filter(BucketUpdate.KeyUpdate::isDeleted).count();
entryCount.addAndGet(bu.getKeyUpdates().size() - deletedCount);
totalEntryCountDelta += bu.getKeyUpdates().size();
}
// Apply the updates.
val attrCount = w.updateBuckets(segment, bucketUpdates, firstKeyOffset, postIndexOffset, totalEntryCountDelta, TIMEOUT).join();
AssertExtensions.assertGreaterThan("Expected at least one attribute to be modified.", 0, attrCount);
checkEntryCount(entryCount.get(), segment);
checkTotalEntryCount(initialTotalEntryCount + totalEntryCountDelta, segment);
// Record the key as being updated.
oldOffsets.forEach(existingKeys::remove);
keysWithOffset.forEach((key, offset) -> {
if (isRemoveOffset(offset)) {
existingKeys.remove(decodeOffset(offset), key);
} else {
existingKeys.put(decodeOffset(offset), key);
}
});
return postIndexOffset;
}
Aggregations