use of io.pravega.segmentstore.contracts.tables.TableEntry in project pravega by pravega.
the class HashTableSegmentLayout method entryDeltaIterator.
@Override
AsyncIterator<IteratorItem<TableEntry>> entryDeltaIterator(@NonNull DirectSegmentAccess segment, long fromPosition, Duration fetchTimeout) {
val segmentInfo = segment.getInfo();
ensureSegmentType(segmentInfo.getName(), segmentInfo.getType());
Preconditions.checkArgument(fromPosition <= segmentInfo.getLength(), "fromPosition (%s) can not exceed the length (%s) of the TableSegment.", fromPosition, segmentInfo.getLength());
logRequest("entryDeltaIterator", segment.getSegmentId(), fromPosition);
long compactionOffset = segmentInfo.getAttributes().getOrDefault(TableAttributes.COMPACTION_OFFSET, 0L);
// All of the most recent keys will exist beyond the compactionOffset.
long startOffset = Math.max(fromPosition, compactionOffset);
// We should clear if the starting position may have been truncated out due to compaction.
boolean shouldClear = fromPosition < compactionOffset;
// Maximum length of the TableSegment we want to read until.
int maxBytesToRead = (int) (segmentInfo.getLength() - startOffset);
TableEntryDeltaIterator.ConvertResult<IteratorItem<TableEntry>> converter = item -> CompletableFuture.completedFuture(new IteratorItemImpl<TableEntry>(item.getKey().serialize(), Collections.singletonList(item.getValue())));
return TableEntryDeltaIterator.<IteratorItem<TableEntry>>builder().segment(segment).entrySerializer(serializer).executor(executor).maxBytesToRead(maxBytesToRead).startOffset(startOffset).currentBatchOffset(fromPosition).fetchTimeout(fetchTimeout).resultConverter(converter).shouldClear(shouldClear).build();
}
use of io.pravega.segmentstore.contracts.tables.TableEntry in project pravega by pravega.
the class FixedKeyLengthTableSegmentLayout method put.
@Override
CompletableFuture<List<Long>> put(@NonNull DirectSegmentAccess segment, @NonNull List<TableEntry> entries, long tableSegmentOffset, TimeoutTimer timer) {
val segmentInfo = segment.getInfo();
ensureSegmentType(segmentInfo.getName(), segmentInfo.getType());
val segmentKeyLength = getSegmentKeyLength(segmentInfo);
ensureValidKeyLength(segmentInfo.getName(), segmentKeyLength);
val attributeUpdates = new AttributeUpdateCollection();
int batchOffset = 0;
val batchOffsets = new ArrayList<Integer>();
boolean isConditional = false;
for (val e : entries) {
val key = e.getKey();
Preconditions.checkArgument(key.getKey().getLength() == segmentKeyLength, "Entry Key Length for key `%s` incompatible with segment '%s' which requires key lengths of %s.", key, segmentInfo.getName(), segmentKeyLength);
attributeUpdates.add(createIndexUpdate(key, batchOffset));
isConditional |= key.hasVersion();
batchOffsets.add(batchOffset);
batchOffset += this.serializer.getUpdateLength(e);
}
logRequest("put", segmentInfo.getName(), isConditional, tableSegmentOffset, entries.size(), batchOffset);
if (batchOffset > this.config.getMaxBatchSize()) {
throw new UpdateBatchTooLargeException(batchOffset, this.config.getMaxBatchSize());
}
// Update total number of entries in Table (this includes updates to the same key).
attributeUpdates.add(new AttributeUpdate(TableAttributes.TOTAL_ENTRY_COUNT, AttributeUpdateType.Accumulate, entries.size()));
val serializedEntries = this.serializer.serializeUpdate(entries);
val append = tableSegmentOffset == TableSegmentLayout.NO_OFFSET ? segment.append(serializedEntries, attributeUpdates, timer.getRemaining()) : segment.append(serializedEntries, attributeUpdates, tableSegmentOffset, timer.getRemaining());
return handleConditionalUpdateException(append, segmentInfo).thenApply(segmentOffset -> {
this.compactionService.process(new CompactionCandidate(segment));
return batchOffsets.stream().map(offset -> offset + segmentOffset).collect(Collectors.toList());
});
}
use of io.pravega.segmentstore.contracts.tables.TableEntry in project pravega by pravega.
the class TableBucketReader method find.
/**
* Attempts to locate something in a TableBucket that matches a particular key.
*
* @param soughtKey A {@link BufferView} instance representing the Key we are looking for.
* @param bucketOffset The current segment offset of the Table Bucket we are looking into.
* @param timer A {@link TimeoutTimer} for the operation.
* @return A CompletableFuture that, when completed, will contain the desired result, or null of no such result
* was found.
*/
CompletableFuture<ResultT> find(BufferView soughtKey, long bucketOffset, TimeoutTimer timer) {
int maxReadLength = getMaxReadLength();
// Read the Key at the current offset and check it against the sought one.
AtomicLong offset = new AtomicLong(bucketOffset);
CompletableFuture<ResultT> result = new CompletableFuture<>();
Futures.loop(() -> !result.isDone(), () -> {
// Read the Key from the Segment. Copy it out of the Segment to avoid losing it or getting corrupted
// values back in case of a cache eviction. See {@link ReadResult#setCopyOnRead(boolean)}.
ReadResult readResult = this.segment.read(offset.get(), maxReadLength, timer.getRemaining());
val reader = getReader(soughtKey, offset.get(), timer);
AsyncReadResultProcessor.process(readResult, reader, this.executor);
return reader.getResult().thenComposeAsync(r -> {
SearchContinuation sc = processResult(r, soughtKey);
if (sc == SearchContinuation.ResultFound || sc == SearchContinuation.NoResult) {
// We either definitely found the result or definitely did not find the result.
// In the case we did not find what we were looking for, we may still have some
// partial result to return to the caller (i.e., a TableEntry with no value, but with
// a version, which indicates a deleted entry (as opposed from an inexistent one).
result.complete(r);
} else {
return this.getBackpointer.apply(this.segment, offset.get(), timer.getRemaining()).thenAccept(newOffset -> {
offset.set(newOffset);
if (newOffset < 0) {
// Could not find anything.
result.complete(null);
}
});
}
return CompletableFuture.completedFuture(null);
}, this.executor);
}, this.executor).exceptionally(ex -> {
result.completeExceptionally(ex);
return null;
});
return result;
}
use of io.pravega.segmentstore.contracts.tables.TableEntry in project pravega by pravega.
the class ContainerKeyIndexTests method testRecovery.
private void testRecovery(TableExtensionConfig config) throws Exception {
val s = new EntrySerializer();
@Cleanup val context = new TestContext(config);
// Setup the segment with initial attributes.
val iw = new IndexWriter(HASHER, executorService());
// 1. Generate initial set of keys and serialize them to the segment.
val keys = generateUnversionedKeys(BATCH_SIZE, context);
val entries1 = new ArrayList<TableEntry>(keys.size());
val offset = new AtomicLong();
val hashes = new ArrayList<UUID>();
val keysWithOffsets = new HashMap<UUID, KeyWithOffset>();
for (val k : keys) {
val hash = HASHER.hash(k.getKey());
hashes.add(hash);
byte[] valueData = new byte[Math.max(1, context.random.nextInt(100))];
context.random.nextBytes(valueData);
val entry = TableEntry.unversioned(k.getKey(), new ByteArraySegment(valueData));
keysWithOffsets.put(hash, new KeyWithOffset(k.getKey(), offset.getAndAdd(s.getUpdateLength(entry))));
entries1.add(entry);
}
val update1 = s.serializeUpdate(entries1);
Assert.assertEquals(offset.get(), update1.getLength());
context.segment.append(update1, null, TIMEOUT).join();
// 2. Initiate a recovery and verify pre-caching is triggered and requests are auto-unblocked.
val get1 = context.index.getBucketOffsets(context.segment, hashes, context.timer);
val result1 = get1.get(SHORT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
val expected1 = new HashMap<UUID, Long>();
keysWithOffsets.forEach((k, o) -> expected1.put(k, o.offset));
AssertExtensions.assertMapEquals("Unexpected result from getBucketOffsets() after auto pre-caching.", expected1, result1);
// 3. Set LastIdx to Length, and increase by TEST_MAX_TAIL_CACHE_PRE_INDEX_LENGTH + 1 (so we don't do pre-caching).
val buckets = iw.locateBuckets(context.segment, keysWithOffsets.keySet(), context.timer).join();
Collection<BucketUpdate> bucketUpdates = buckets.entrySet().stream().map(e -> {
val builder = BucketUpdate.forBucket(e.getValue());
val ko = keysWithOffsets.get(e.getKey());
builder.withKeyUpdate(new BucketUpdate.KeyUpdate(ko.key, ko.offset, ko.offset, false));
return builder.build();
}).collect(Collectors.toList());
iw.updateBuckets(context.segment, bucketUpdates, 0L, offset.get(), keysWithOffsets.size(), TIMEOUT).join();
context.segment.append(new ByteArraySegment(new byte[TEST_MAX_TAIL_CACHE_PRE_INDEX_LENGTH + 1]), null, TIMEOUT).join();
// 4. Verify pre-caching is disabled and that the requests are blocked.
// Force-evict it so we start clean.
context.index.notifyIndexOffsetChanged(context.segment.getSegmentId(), -1, 0);
val getBucketOffsets = context.index.getBucketOffsets(context.segment, hashes, context.timer);
val backpointerKey = keysWithOffsets.values().stream().findFirst().get();
val getBackpointers = context.index.getBackpointerOffset(context.segment, backpointerKey.offset, context.timer.getRemaining());
val getUnindexedKeys = context.index.getUnindexedKeyHashes(context.segment);
val conditionalUpdateKey = TableKey.notExists(generateUnversionedKeys(1, context).get(0).getKey());
val conditionalUpdate = context.index.update(context.segment, toUpdateBatch(conditionalUpdateKey), () -> CompletableFuture.completedFuture(context.segment.getInfo().getLength() + 1L), context.timer);
Assert.assertFalse("Expected getBucketOffsets() to block.", getBucketOffsets.isDone());
Assert.assertFalse("Expected getBackpointerOffset() to block.", getBackpointers.isDone());
Assert.assertFalse("Expecting conditional update to block.", conditionalUpdate.isDone());
// 4.1. Verify unconditional updates go through.
val unconditionalUpdateKey = generateUnversionedKeys(1, context).get(0);
val unconditionalUpdateResult = context.index.update(context.segment, toUpdateBatch(unconditionalUpdateKey), () -> CompletableFuture.completedFuture(context.segment.getInfo().getLength() + 2L), context.timer).get(SHORT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
Assert.assertEquals("Unexpected result from the non-blocked unconditional update.", context.segment.getInfo().getLength() + 2L, (long) unconditionalUpdateResult.get(0));
// 3. Verify that all operations are unblocked when we reached the expected IndexOffset.
context.index.notifyIndexOffsetChanged(context.segment.getSegmentId(), context.segment.getInfo().getLength() - 1, 0);
Assert.assertFalse("Not expecting anything to be unblocked at this point", getBucketOffsets.isDone() || getBackpointers.isDone() || conditionalUpdate.isDone() || getUnindexedKeys.isDone());
context.index.notifyIndexOffsetChanged(context.segment.getSegmentId(), context.segment.getInfo().getLength(), 0);
val getBucketOffsetsResult = getBucketOffsets.get(SHORT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
val getBackpointersResult = getBackpointers.get(SHORT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
val conditionalUpdateResult = conditionalUpdate.get(SHORT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
val getUnindexedKeysResult = getUnindexedKeys.get(SHORT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
checkKeyOffsets(hashes, keysWithOffsets, getBucketOffsetsResult);
Assert.assertEquals("Unexpected result from unblocked getBackpointerOffset().", -1L, (long) getBackpointersResult);
Assert.assertEquals("Unexpected result from unblocked conditional update.", context.segment.getInfo().getLength() + 1L, (long) conditionalUpdateResult.get(0));
// Depending on the order in which the internal recovery tracker (implemented by CompletableFuture.thenCompose)
// executes its callbacks, the result of this call may be either 1 or 2 (it may unblock prior to the conditional
// update unblocking or the other way around).
Assert.assertTrue("Unexpected result size from unblocked getUnindexedKeyHashes().", getUnindexedKeysResult.size() == 1 || getUnindexedKeysResult.size() == 2);
// However, verify that in the end, we have 2 unindexed keys.
val finalGetUnindexedKeysResult = context.index.getUnindexedKeyHashes(context.segment).get(SHORT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
Assert.assertEquals("Unexpected result size from final getUnindexedKeyHashes().", 2, finalGetUnindexedKeysResult.size());
// 4. Verify no new requests are blocked now.
// A timeout check will suffice
getBucketOffsets.get(SHORT_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
// 5. Verify requests are cancelled if we notify the segment has been removed.
context.index.notifyIndexOffsetChanged(context.segment.getSegmentId(), -1L, 0);
val cancelledKey = TableKey.notExists(generateUnversionedKeys(1, context).get(0).getKey());
val cancelledRequest = context.index.update(context.segment, toUpdateBatch(cancelledKey), () -> CompletableFuture.completedFuture(context.segment.getInfo().getLength() + 3L), context.timer);
context.index.notifyIndexOffsetChanged(context.segment.getSegmentId(), -1L, 0);
AssertExtensions.assertFutureThrows("Blocked request was not cancelled when a segment remove notification was received.", cancelledRequest, ex -> ex instanceof CancellationException);
// 6. Verify requests are cancelled (properly) when we close the index.
context.index.notifyIndexOffsetChanged(context.segment.getSegmentId(), -1L, 0);
val cancelledKey2 = TableKey.notExists(generateUnversionedKeys(1, context).get(0).getKey());
val cancelledRequest2 = context.index.update(context.segment, toUpdateBatch(cancelledKey2), () -> CompletableFuture.completedFuture(context.segment.getInfo().getLength() + 4L), context.timer);
context.index.close();
AssertExtensions.assertFutureThrows("Blocked request was not cancelled when a the index was closed.", cancelledRequest2, ex -> ex instanceof ObjectClosedException);
}
use of io.pravega.segmentstore.contracts.tables.TableEntry in project pravega by pravega.
the class PravegaRequestProcessorTest method testUpdateEntries.
@Test(timeout = 20000)
public void testUpdateEntries() throws Exception {
// Set up PravegaRequestProcessor instance to execute requests against
val rnd = new Random(0);
String tableSegmentName = "testUpdateEntries";
@Cleanup ServiceBuilder serviceBuilder = newInlineExecutionInMemoryBuilder(getBuilderConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
TableStore tableStore = serviceBuilder.createTableStoreService();
ServerConnection connection = mock(ServerConnection.class);
ConnectionTracker tracker = mock(ConnectionTracker.class);
InOrder order = inOrder(connection);
val recorderMock = mock(TableSegmentStatsRecorder.class);
PravegaRequestProcessor processor = new PravegaRequestProcessor(store, tableStore, new TrackedConnection(connection, tracker), SegmentStatsRecorder.noOp(), recorderMock, new PassingTokenVerifier(), false);
// Generate keys
ArrayList<ArrayView> keys = generateKeys(3, rnd);
// Execute and Verify createSegment calling stack is executed as design.
processor.createTableSegment(new WireCommands.CreateTableSegment(1, tableSegmentName, false, 0, "", 0));
order.verify(connection).send(new WireCommands.SegmentCreated(1, tableSegmentName));
verify(recorderMock).createTableSegment(eq(tableSegmentName), any());
// Test with unversioned data.
TableEntry e1 = TableEntry.unversioned(keys.get(0), generateValue(rnd));
WireCommands.TableEntries cmd = getTableEntries(singletonList(e1));
processor.updateTableEntries(new WireCommands.UpdateTableEntries(2, tableSegmentName, "", cmd, WireCommands.NULL_TABLE_SEGMENT_OFFSET));
order.verify(connection).send(new WireCommands.TableEntriesUpdated(2, singletonList(0L)));
verify(recorderMock).updateEntries(eq(tableSegmentName), eq(1), eq(false), any());
verifyConnectionTracker(e1, connection, tracker);
// Test with versioned data.
e1 = TableEntry.versioned(keys.get(0), generateValue(rnd), 0L);
cmd = getTableEntries(singletonList(e1));
processor.updateTableEntries(new WireCommands.UpdateTableEntries(3, tableSegmentName, "", cmd, WireCommands.NULL_TABLE_SEGMENT_OFFSET));
ArgumentCaptor<WireCommand> wireCommandsCaptor = ArgumentCaptor.forClass(WireCommand.class);
order.verify(connection).send(wireCommandsCaptor.capture());
verify(recorderMock).updateEntries(eq(tableSegmentName), eq(1), eq(true), any());
verifyConnectionTracker(e1, connection, tracker);
List<Long> keyVersions = ((WireCommands.TableEntriesUpdated) wireCommandsCaptor.getAllValues().get(0)).getUpdatedVersions();
assertTrue(keyVersions.size() == 1);
// Test with key not present. The table store throws KeyNotExistsException.
TableEntry e2 = TableEntry.versioned(keys.get(1), generateValue(rnd), 0L);
processor.updateTableEntries(new WireCommands.UpdateTableEntries(4, tableSegmentName, "", getTableEntries(singletonList(e2)), WireCommands.NULL_TABLE_SEGMENT_OFFSET));
order.verify(connection).send(new WireCommands.TableKeyDoesNotExist(4, tableSegmentName, ""));
verifyNoMoreInteractions(recorderMock);
verifyConnectionTracker(e2, connection, tracker);
// Test with invalid key version. The table store throws BadKeyVersionException.
TableEntry e3 = TableEntry.versioned(keys.get(0), generateValue(rnd), 10L);
processor.updateTableEntries(new WireCommands.UpdateTableEntries(5, tableSegmentName, "", getTableEntries(singletonList(e3)), WireCommands.NULL_TABLE_SEGMENT_OFFSET));
order.verify(connection).send(new WireCommands.TableKeyBadVersion(5, tableSegmentName, ""));
verifyNoMoreInteractions(recorderMock);
verifyConnectionTracker(e3, connection, tracker);
// Test with valid tableSegmentOffset.
long tableSegmentOffset = store.getStreamSegmentInfo(tableSegmentName, Duration.ofMinutes(1)).get().getLength();
TableEntry e4 = TableEntry.versioned(keys.get(0), generateValue(rnd), keyVersions.get(0));
processor.updateTableEntries(new WireCommands.UpdateTableEntries(6, tableSegmentName, "", getTableEntries(singletonList(e4)), tableSegmentOffset));
order.verify(connection).send(wireCommandsCaptor.capture());
verify(recorderMock, times(2)).updateEntries(eq(tableSegmentName), eq(1), eq(true), any());
verifyConnectionTracker(e4, connection, tracker);
keyVersions = ((WireCommands.TableEntriesUpdated) wireCommandsCaptor.getAllValues().get(1)).getUpdatedVersions();
assertTrue(keyVersions.size() == 1);
// Test with invalid tableSegmentOffset.
TableEntry e5 = TableEntry.versioned(keys.get(0), generateValue(rnd), keyVersions.get(0));
processor.updateTableEntries(new WireCommands.UpdateTableEntries(7, tableSegmentName, "", getTableEntries(singletonList(e5)), tableSegmentOffset - 1));
long length = store.getStreamSegmentInfo(tableSegmentName, Duration.ofMinutes(1)).get().getLength();
order.verify(connection).send(new WireCommands.SegmentIsTruncated(7, tableSegmentName, length, "", tableSegmentOffset - 1));
verify(recorderMock, times(2)).updateEntries(eq(tableSegmentName), eq(1), eq(true), any());
verifyConnectionTracker(e5, connection, tracker);
}
Aggregations