use of io.pravega.segmentstore.server.AttributeIndex in project pravega by pravega.
the class AttributeIndexTests method checkIndex.
private void checkIndex(AttributeIndex index, Map<AttributeId, Long> expectedValues) {
val actual = index.get(expectedValues.keySet(), TIMEOUT).join();
val expected = expectedValues.entrySet().stream().filter(e -> e.getValue() != Attributes.NULL_ATTRIBUTE_VALUE).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
AssertExtensions.assertMapEquals("Unexpected attributes in index.", expected, actual);
}
use of io.pravega.segmentstore.server.AttributeIndex in project pravega by pravega.
the class ContainerAttributeIndexImpl method forSegment.
// endregion
// region ContainerAttributeIndex Implementation
@Override
public CompletableFuture<AttributeIndex> forSegment(long streamSegmentId, Duration timeout) {
Exceptions.checkNotClosed(this.closed.get(), this);
SegmentMetadata sm = this.containerMetadata.getStreamSegmentMetadata(streamSegmentId);
if (sm.isDeleted()) {
return Futures.failedFuture(new StreamSegmentNotExistsException(sm.getName()));
}
// Figure out if we already have this AttributeIndex cached. If not, we need to initialize it.
CompletableFuture<AttributeIndex> result;
AtomicReference<SegmentAttributeBTreeIndex> toInitialize = new AtomicReference<>();
synchronized (this.attributeIndices) {
result = this.attributeIndices.computeIfAbsent(streamSegmentId, id -> {
toInitialize.set(new SegmentAttributeBTreeIndex(sm, this.storage, this.cacheManager.getCacheStorage(), this.config, this.executor));
return new CompletableFuture<>();
});
}
if (toInitialize.get() == null) {
// its initialization is done).
return result;
} else {
try {
// Need to initialize the AttributeIndex and complete the future that we just registered.
// If this fails, we must fail the Future that we previously registered and unregister any pointers to
// this index.
toInitialize.get().initialize(timeout).thenRun(() -> this.cacheManager.register(toInitialize.get())).whenComplete((r, ex) -> {
if (ex == null) {
result.complete(toInitialize.get());
} else {
indexInitializationFailed(streamSegmentId, result, ex);
}
});
} catch (Throwable ex) {
if (!Exceptions.mustRethrow(ex)) {
indexInitializationFailed(streamSegmentId, result, ex);
}
throw ex;
}
}
return result;
}
use of io.pravega.segmentstore.server.AttributeIndex in project pravega by pravega.
the class AttributeIndexTests method testRecoveryAfterIncompleteUpdateWithRootPointer.
/**
* Tests the ability of the Attribute Index to recover correctly after a partial update has been written to Storage.
* This simulates how it should be used by a caller: after every update, the {@link Attributes#ATTRIBUTE_SEGMENT_ROOT_POINTER}
* attribute of the segment should be set to the return value from {@link AttributeIndex#update} in order to perform
* a correct recovery.
*/
@Test
public void testRecoveryAfterIncompleteUpdateWithRootPointer() {
final int attributeCount = 1000;
val attributes = IntStream.range(0, attributeCount).mapToObj(i -> AttributeId.uuid(i, i)).collect(Collectors.toList());
@Cleanup val context = new TestContext(DEFAULT_CONFIG);
// Root pointers are read from Segment's Metadata if Storage does not support atomic writes. This test validates
// that case: a partial write with the help of metadata-stored Root Pointers (if the Root Pointers were not stored
// in the metadata, then the recovery would fail).
context.storage.supportsAtomicWrites = false;
populateSegments(context);
// 1. Populate and verify first index.
val idx = context.index.forSegment(SEGMENT_ID, TIMEOUT).join();
val expectedValues = new HashMap<AttributeId, Long>();
val updateBatch = new HashMap<AttributeId, Long>();
AtomicLong nextValue = new AtomicLong(0);
for (AttributeId attributeId : attributes) {
long value = nextValue.getAndIncrement();
expectedValues.put(attributeId, value);
updateBatch.put(attributeId, value);
}
// Perform the update and remember the root pointer.
long rootPointer = idx.update(updateBatch, TIMEOUT).join();
context.containerMetadata.getStreamSegmentMetadata(SEGMENT_ID).updateAttributes(Collections.singletonMap(Attributes.ATTRIBUTE_SEGMENT_ROOT_POINTER, rootPointer));
// 2. Write some garbage data at the end of the segment. This simulates a partial (incomplete update) that did not
// fully write the BTree pages to the end of the segment.
String attributeSegmentName = NameUtils.getAttributeSegmentName(SEGMENT_NAME);
byte[] partialUpdate = new byte[1234];
context.storage.openWrite(attributeSegmentName).thenCompose(handle -> context.storage.write(handle, context.storage.getStreamSegmentInfo(attributeSegmentName, TIMEOUT).join().getLength(), new ByteArrayInputStream(partialUpdate), partialUpdate.length, TIMEOUT)).join();
// 3. Reload index and verify it still has the correct values. This also forces a cache cleanup so we read data
// directly from Storage.
context.index.cleanup(null);
val storageRead = new AtomicBoolean();
context.storage.readInterceptor = (name, offset, length, storage) -> CompletableFuture.runAsync(() -> storageRead.set(true));
val idx2 = context.index.forSegment(SEGMENT_ID, TIMEOUT).join();
checkIndex(idx2, expectedValues);
Assert.assertTrue("Expecting storage reads after reload.", storageRead.get());
// 4. Remove all values (and thus force an update - validates conditional updates still work in this case).
idx2.update(toDelete(expectedValues.keySet()), TIMEOUT).join();
expectedValues.replaceAll((key, v) -> Attributes.NULL_ATTRIBUTE_VALUE);
checkIndex(idx2, expectedValues);
}
Aggregations