use of io.pravega.segmentstore.contracts.AttributeId in project pravega by pravega.
the class StreamSegmentContainerTests method testConcurrentSegmentActivation.
/**
* Tests the ability for the StreamSegmentContainer to handle concurrent actions on a Segment that it does not know
* anything about, and handling the resulting concurrency.
* Note: this is tested with a single segment. It could be tested with multiple segments, but different segments
* are mostly independent of each other, so we would not be gaining much by doing so.
*/
@Test
public void testConcurrentSegmentActivation() throws Exception {
final AttributeId attributeAccumulate = AttributeId.randomUUID();
final long expectedAttributeValue = APPENDS_PER_SEGMENT + ATTRIBUTE_UPDATES_PER_SEGMENT;
final int appendLength = 10;
@Cleanup TestContext context = createContext();
context.container.startAsync().awaitRunning();
// 1. Create the StreamSegments.
String segmentName = createSegments(context).get(0);
// 2. Add some appends.
List<CompletableFuture<Void>> opFutures = Collections.synchronizedList(new ArrayList<>());
AtomicLong expectedLength = new AtomicLong();
@Cleanup("shutdown") ExecutorService testExecutor = newScheduledThreadPool(Math.min(20, APPENDS_PER_SEGMENT), "testConcurrentSegmentActivation");
val submitFutures = new ArrayList<Future<?>>();
for (int i = 0; i < APPENDS_PER_SEGMENT; i++) {
final byte fillValue = (byte) i;
submitFutures.add(testExecutor.submit(() -> {
val attributeUpdates = AttributeUpdateCollection.from(new AttributeUpdate(attributeAccumulate, AttributeUpdateType.Accumulate, 1));
byte[] appendData = new byte[appendLength];
Arrays.fill(appendData, (byte) (fillValue + 1));
opFutures.add(Futures.toVoid(context.container.append(segmentName, new ByteArraySegment(appendData), attributeUpdates, TIMEOUT)));
expectedLength.addAndGet(appendData.length);
}));
}
// 2.1 Update the attribute.
for (int i = 0; i < ATTRIBUTE_UPDATES_PER_SEGMENT; i++) {
submitFutures.add(testExecutor.submit(() -> {
AttributeUpdateCollection attributeUpdates = AttributeUpdateCollection.from(new AttributeUpdate(attributeAccumulate, AttributeUpdateType.Accumulate, 1));
opFutures.add(context.container.updateAttributes(segmentName, attributeUpdates, TIMEOUT));
}));
}
// Wait for the submittal of tasks to complete.
submitFutures.forEach(this::await);
// Now wait for all the appends to finish.
Futures.allOf(opFutures).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// 3. getSegmentInfo: verify final state of the attribute.
SegmentProperties sp = context.container.getStreamSegmentInfo(segmentName, TIMEOUT).join();
Assert.assertEquals("Unexpected length for segment " + segmentName, expectedLength.get(), sp.getLength());
Assert.assertFalse("Unexpected value for isDeleted for segment " + segmentName, sp.isDeleted());
Assert.assertFalse("Unexpected value for isSealed for segment " + segmentName, sp.isDeleted());
Assert.assertEquals("Unexpected Segment Type.", getSegmentType(segmentName), SegmentType.fromAttributes(sp.getAttributes()));
// Verify all attribute values.
Assert.assertEquals("Unexpected value for attribute " + attributeAccumulate + " for segment " + segmentName, expectedAttributeValue, (long) sp.getAttributes().getOrDefault(attributeAccumulate, Attributes.NULL_ATTRIBUTE_VALUE));
checkActiveSegments(context.container, 1);
// 4. Written data.
waitForOperationsInReadIndex(context.container);
byte[] actualData = new byte[(int) expectedLength.get()];
int offset = 0;
@Cleanup ReadResult readResult = context.container.read(segmentName, 0, actualData.length, TIMEOUT).join();
while (readResult.hasNext()) {
ReadResultEntry readEntry = readResult.next();
BufferView readEntryContents = readEntry.getContent().join();
AssertExtensions.assertLessThanOrEqual("Too much to read.", actualData.length, offset + actualData.length);
readEntryContents.copyTo(ByteBuffer.wrap(actualData, offset, actualData.length));
offset += actualData.length;
}
Assert.assertEquals("Unexpected number of bytes read.", actualData.length, offset);
Assert.assertTrue("Unexpected number of bytes read (multiple of appendLength).", actualData.length % appendLength == 0);
boolean[] observedValues = new boolean[APPENDS_PER_SEGMENT + 1];
for (int i = 0; i < actualData.length; i += appendLength) {
byte value = actualData[i];
Assert.assertFalse("Append with value " + value + " was written multiple times.", observedValues[value]);
observedValues[value] = true;
for (int j = 1; j < appendLength; j++) {
Assert.assertEquals("Append was not written atomically at offset " + (i + j), value, actualData[i + j]);
}
}
// Verify all the appends made it (we purposefully did not write 0, since that's the default fill value in an array).
Assert.assertFalse("Not expecting 0 as a value.", observedValues[0]);
for (int i = 1; i < observedValues.length; i++) {
Assert.assertTrue("Append with value " + i + " was not written.", observedValues[i]);
}
context.container.stopAsync().awaitTerminated();
}
use of io.pravega.segmentstore.contracts.AttributeId in project pravega by pravega.
the class AttributeIndexTests method testRecoveryAfterIncompleteUpdateWithRootPointer.
/**
* Tests the ability of the Attribute Index to recover correctly after a partial update has been written to Storage.
* This simulates how it should be used by a caller: after every update, the {@link Attributes#ATTRIBUTE_SEGMENT_ROOT_POINTER}
* attribute of the segment should be set to the return value from {@link AttributeIndex#update} in order to perform
* a correct recovery.
*/
@Test
public void testRecoveryAfterIncompleteUpdateWithRootPointer() {
final int attributeCount = 1000;
val attributes = IntStream.range(0, attributeCount).mapToObj(i -> AttributeId.uuid(i, i)).collect(Collectors.toList());
@Cleanup val context = new TestContext(DEFAULT_CONFIG);
// Root pointers are read from Segment's Metadata if Storage does not support atomic writes. This test validates
// that case: a partial write with the help of metadata-stored Root Pointers (if the Root Pointers were not stored
// in the metadata, then the recovery would fail).
context.storage.supportsAtomicWrites = false;
populateSegments(context);
// 1. Populate and verify first index.
val idx = context.index.forSegment(SEGMENT_ID, TIMEOUT).join();
val expectedValues = new HashMap<AttributeId, Long>();
val updateBatch = new HashMap<AttributeId, Long>();
AtomicLong nextValue = new AtomicLong(0);
for (AttributeId attributeId : attributes) {
long value = nextValue.getAndIncrement();
expectedValues.put(attributeId, value);
updateBatch.put(attributeId, value);
}
// Perform the update and remember the root pointer.
long rootPointer = idx.update(updateBatch, TIMEOUT).join();
context.containerMetadata.getStreamSegmentMetadata(SEGMENT_ID).updateAttributes(Collections.singletonMap(Attributes.ATTRIBUTE_SEGMENT_ROOT_POINTER, rootPointer));
// 2. Write some garbage data at the end of the segment. This simulates a partial (incomplete update) that did not
// fully write the BTree pages to the end of the segment.
String attributeSegmentName = NameUtils.getAttributeSegmentName(SEGMENT_NAME);
byte[] partialUpdate = new byte[1234];
context.storage.openWrite(attributeSegmentName).thenCompose(handle -> context.storage.write(handle, context.storage.getStreamSegmentInfo(attributeSegmentName, TIMEOUT).join().getLength(), new ByteArrayInputStream(partialUpdate), partialUpdate.length, TIMEOUT)).join();
// 3. Reload index and verify it still has the correct values. This also forces a cache cleanup so we read data
// directly from Storage.
context.index.cleanup(null);
val storageRead = new AtomicBoolean();
context.storage.readInterceptor = (name, offset, length, storage) -> CompletableFuture.runAsync(() -> storageRead.set(true));
val idx2 = context.index.forSegment(SEGMENT_ID, TIMEOUT).join();
checkIndex(idx2, expectedValues);
Assert.assertTrue("Expecting storage reads after reload.", storageRead.get());
// 4. Remove all values (and thus force an update - validates conditional updates still work in this case).
idx2.update(toDelete(expectedValues.keySet()), TIMEOUT).join();
expectedValues.replaceAll((key, v) -> Attributes.NULL_ATTRIBUTE_VALUE);
checkIndex(idx2, expectedValues);
}
use of io.pravega.segmentstore.contracts.AttributeId in project pravega by pravega.
the class MetadataStoreTestBase method testGetStreamSegmentInfoWithConcurrency.
/**
* Tests GetStreamSegmentInfo when it is invoked in parallel with a Segment assignment.
* @throws Exception if the test failed.
*/
@Test
public void testGetStreamSegmentInfoWithConcurrency() throws Exception {
final String segmentName = "Segment";
final long segmentId = 123;
final SegmentProperties storageInfo = StreamSegmentInformation.builder().name(segmentName).length(123).sealed(true).build();
final long metadataLength = storageInfo.getLength() + 1;
@Cleanup TestContext context = createTestContext();
val initialSegmentInfo = StreamSegmentInformation.builder().name(segmentName).startOffset(0L).length(1L).attributes(toAttributes(createAttributeUpdates(ATTRIBUTE_COUNT))).build();
context.getMetadataStore().updateSegmentInfo(toMetadata(segmentId, initialSegmentInfo), TIMEOUT).join();
Map<AttributeId, Long> expectedAttributes = initialSegmentInfo.getAttributes();
CompletableFuture<Void> addInvoked = new CompletableFuture<>();
context.connector.setMapSegmentId((id, sp, pin, timeout) -> {
addInvoked.join();
UpdateableSegmentMetadata segmentMetadata = context.getMetadata().mapStreamSegmentId(segmentName, segmentId);
segmentMetadata.setStorageLength(sp.getLength());
segmentMetadata.setLength(metadataLength);
segmentMetadata.updateAttributes(expectedAttributes);
if (sp.isSealed()) {
segmentMetadata.markSealed();
}
if (pin) {
segmentMetadata.markPinned();
}
return CompletableFuture.completedFuture(segmentId);
});
// Second call is designed to hit when the first call still tries to assign the id, hence we test normal queueing.
context.getMetadataStore().getOrAssignSegmentId(segmentName, TIMEOUT, id -> CompletableFuture.completedFuture(null));
// Concurrently with the map, request a Segment Info.
CompletableFuture<SegmentProperties> segmentInfoFuture = context.getMetadataStore().getSegmentInfo(segmentName, TIMEOUT);
Assert.assertFalse("getSegmentInfo returned a completed future.", segmentInfoFuture.isDone());
// Release the OperationLog add and verify the Segment Info has been served with information from the Metadata.
addInvoked.complete(null);
SegmentProperties segmentInfo = segmentInfoFuture.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
val expectedInfo = context.getMetadata().getStreamSegmentMetadata(segmentId);
assertEquals("Unexpected Segment Info returned.", expectedInfo, segmentInfo);
SegmentMetadataComparer.assertSameAttributes("Unexpected attributes returned.", expectedInfo.getAttributes(), segmentInfo);
int storageGetCount = context.getStoreReadCount();
Assert.assertEquals("Unexpected number of Storage.read() calls.", 1, storageGetCount);
}
use of io.pravega.segmentstore.contracts.AttributeId in project pravega by pravega.
the class MetadataStoreTestBase method testGetOrAssignStreamSegmentId.
/**
* Tests the ability of the MetadataStore to generate/return the Id of an existing StreamSegment, as well as
* retrieving existing attributes.
*/
@Test
public void testGetOrAssignStreamSegmentId() {
final long baseSegmentId = 1000;
final long minSegmentLength = 1;
final int segmentCount = 50;
Function<String, Long> getSegmentLength = segmentName -> minSegmentLength + MathHelpers.abs(segmentName.hashCode());
Function<String, Long> getSegmentStartOffset = segmentName -> getSegmentLength.apply(segmentName) / 2;
@Cleanup TestContext context = createTestContext();
HashSet<String> segmentNames = new HashSet<>();
HashSet<String> sealedSegments = new HashSet<>();
for (int i = 0; i < segmentCount; i++) {
String segmentName = getName(i);
segmentNames.add(segmentName);
val si = StreamSegmentInformation.builder().name(segmentName).length(getSegmentLength.apply(segmentName)).startOffset(getSegmentStartOffset.apply(segmentName)).sealed(i % 2 == 0).attributes(toAttributes(createAttributeUpdates(ATTRIBUTE_COUNT))).build();
if (si.isSealed()) {
sealedSegments.add(segmentName);
}
context.getMetadataStore().updateSegmentInfo(toMetadata(baseSegmentId + i, si), TIMEOUT).join();
}
Predicate<String> isSealed = sealedSegments::contains;
for (String name : segmentNames) {
long id = context.getMetadataStore().getOrAssignSegmentId(name, TIMEOUT).join();
Assert.assertNotEquals("No id was assigned for StreamSegment " + name, ContainerMetadata.NO_STREAM_SEGMENT_ID, id);
SegmentMetadata sm = context.getMetadata().getStreamSegmentMetadata(id);
Assert.assertNotNull("No metadata was created for StreamSegment " + name, sm);
long expectedLength = getSegmentLength.apply(name);
boolean expectedSeal = isSealed.test(name);
Assert.assertEquals("Metadata does not have the expected length for StreamSegment " + name, expectedLength, sm.getLength());
Assert.assertEquals("Metadata does not have the expected value for isSealed for StreamSegment " + name, expectedSeal, sm.isSealed());
val segmentState = context.getMetadataStore().getSegmentInfo(name, TIMEOUT).join();
Map<AttributeId, Long> expectedAttributes = segmentState == null ? null : segmentState.getAttributes();
SegmentMetadataComparer.assertSameAttributes("Unexpected attributes in metadata for StreamSegment " + name, expectedAttributes, sm);
long expectedStartOffset = segmentState == null ? 0 : segmentState.getStartOffset();
Assert.assertEquals("Unexpected StartOffset in metadata for " + name, expectedStartOffset, sm.getStartOffset());
}
}
use of io.pravega.segmentstore.contracts.AttributeId in project pravega by pravega.
the class MetadataStoreTestBase method createAttributeUpdates.
private Collection<AttributeUpdate> createAttributeUpdates(int count) {
Collection<AttributeUpdate> result = new ArrayList<>(count);
for (int i = 0; i < count; i++) {
boolean isCore = i % 2 == 0;
AttributeId id = isCore ? AttributeId.uuid(Attributes.CORE_ATTRIBUTE_ID_PREFIX, i + 10000) : AttributeId.randomUUID();
AttributeUpdateType ut = AttributeUpdateType.values()[i % AttributeUpdateType.values().length];
result.add(new AttributeUpdate(id, ut, i, i));
}
return result;
}
Aggregations