use of io.pravega.segmentstore.contracts.ReadResultEntry in project pravega by pravega.
the class ContainerReadIndexTests method checkReadIndex.
private void checkReadIndex(String testId, HashMap<Long, ByteArrayOutputStream> segmentContents, TestContext context) throws Exception {
for (long segmentId : segmentContents.keySet()) {
long startOffset = context.metadata.getStreamSegmentMetadata(segmentId).getStartOffset();
long segmentLength = context.metadata.getStreamSegmentMetadata(segmentId).getLength();
byte[] expectedData = segmentContents.get(segmentId).toByteArray();
if (startOffset > 0) {
@Cleanup ReadResult truncatedResult = context.readIndex.read(segmentId, 0, 1, TIMEOUT);
val first = truncatedResult.next();
Assert.assertEquals("Read request for a truncated offset did not start with a Truncated ReadResultEntryType.", ReadResultEntryType.Truncated, first.getType());
AssertExtensions.assertThrows("Truncate ReadResultEntryType did not throw when getContent() was invoked.", () -> {
first.requestContent(TIMEOUT);
return first.getContent();
}, ex -> ex instanceof StreamSegmentTruncatedException);
}
long expectedCurrentOffset = startOffset;
@Cleanup ReadResult readResult = context.readIndex.read(segmentId, expectedCurrentOffset, (int) (segmentLength - expectedCurrentOffset), TIMEOUT);
Assert.assertTrue(testId + ": Empty read result for segment " + segmentId, readResult.hasNext());
while (readResult.hasNext()) {
ReadResultEntry readEntry = readResult.next();
AssertExtensions.assertGreaterThan(testId + ": getRequestedReadLength should be a positive integer for segment " + segmentId, 0, readEntry.getRequestedReadLength());
Assert.assertEquals(testId + ": Unexpected value from getStreamSegmentOffset for segment " + segmentId, expectedCurrentOffset, readEntry.getStreamSegmentOffset());
// Since this is a non-sealed segment, we only expect Cache or Storage read result entries.
Assert.assertTrue(testId + ": Unexpected type of ReadResultEntry for non-sealed segment " + segmentId, readEntry.getType() == ReadResultEntryType.Cache || readEntry.getType() == ReadResultEntryType.Storage);
if (readEntry.getType() == ReadResultEntryType.Cache) {
Assert.assertTrue(testId + ": getContent() did not return a completed future (ReadResultEntryType.Cache) for segment" + segmentId, readEntry.getContent().isDone() && !readEntry.getContent().isCompletedExceptionally());
} else if (readEntry.getType() == ReadResultEntryType.Storage) {
Assert.assertFalse(testId + ": getContent() did not return a non-completed future (ReadResultEntryType.Storage) for segment" + segmentId, readEntry.getContent().isDone() && !readEntry.getContent().isCompletedExceptionally());
}
// Request content, in case it wasn't returned yet.
readEntry.requestContent(TIMEOUT);
ReadResultEntryContents readEntryContents = readEntry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
AssertExtensions.assertGreaterThan(testId + ": getContent() returned an empty result entry for segment " + segmentId, 0, readEntryContents.getLength());
byte[] actualData = new byte[readEntryContents.getLength()];
StreamHelpers.readAll(readEntryContents.getData(), actualData, 0, actualData.length);
AssertExtensions.assertArrayEquals(testId + ": Unexpected data read from segment " + segmentId + " at offset " + expectedCurrentOffset, expectedData, (int) expectedCurrentOffset, actualData, 0, readEntryContents.getLength());
expectedCurrentOffset += readEntryContents.getLength();
}
Assert.assertTrue(testId + ": ReadResult was not closed post-full-consumption for segment" + segmentId, readResult.isClosed());
}
}
use of io.pravega.segmentstore.contracts.ReadResultEntry in project pravega by pravega.
the class ContainerReadIndexTests method testConcurrentReadTransactionStorageMerge.
/**
* Tests the following scenario, where the Read Index has a read from a portion in a parent segment where a transaction
* was just merged (fully in storage), but the read request might result in either an ObjectClosedException or
* StreamSegmentNotExistsException:
* * A Parent Segment has a Transaction with some data in it, and at least 1 byte of data not in cache.
* * The Transaction is begin-merged in the parent (Tier 1 only).
* * A Read Request is issued to the Parent for the range of data from the Transaction, which includes the 1 byte not in cache.
* * The Transaction is fully merged (Tier 2).
* * The Read Request is invoked and its content requested. This should correctly retrieve the data from the Parent
* Segment in Storage, and not attempt to access the now-defunct Transaction segment.
*/
@Test
public void testConcurrentReadTransactionStorageMerge() throws Exception {
CachePolicy cachePolicy = new CachePolicy(1, Duration.ZERO, Duration.ofMillis(1));
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, cachePolicy);
// Create parent segment and one transaction
long parentId = createSegment(0, context);
UpdateableSegmentMetadata parentMetadata = context.metadata.getStreamSegmentMetadata(parentId);
long transactionId = createTransaction(parentMetadata, 1, context);
UpdateableSegmentMetadata transactionMetadata = context.metadata.getStreamSegmentMetadata(transactionId);
createSegmentsInStorage(context);
// Write something to the transaction, and make sure it also makes its way to Storage.
byte[] writeData = getAppendData(transactionMetadata.getName(), transactionId, 0, 0);
appendSingleWrite(transactionId, writeData, context);
val transactionWriteHandle = context.storage.openWrite(transactionMetadata.getName()).join();
context.storage.write(transactionWriteHandle, 0, new ByteArrayInputStream(writeData), writeData.length, TIMEOUT).join();
transactionMetadata.setStorageLength(transactionMetadata.getLength());
// Seal & Begin-merge the transaction (do not seal in storage).
transactionMetadata.markSealed();
parentMetadata.setLength(transactionMetadata.getLength());
context.readIndex.beginMerge(parentId, 0, transactionId);
transactionMetadata.markMerged();
// Clear the cache.
context.cacheManager.applyCachePolicy();
// Issue read from the parent and fetch the first entry (there should only be one).
ReadResult rr = context.readIndex.read(parentId, 0, writeData.length, TIMEOUT);
Assert.assertTrue("Parent Segment read indicates no data available.", rr.hasNext());
ReadResultEntry entry = rr.next();
Assert.assertEquals("Unexpected offset for read result entry.", 0, entry.getStreamSegmentOffset());
Assert.assertEquals("Served read result entry is not from storage.", ReadResultEntryType.Storage, entry.getType());
// Merge the transaction in storage & complete-merge it.
transactionMetadata.markSealed();
transactionMetadata.markSealedInStorage();
transactionMetadata.markDeleted();
context.storage.seal(transactionWriteHandle, TIMEOUT).join();
val parentWriteHandle = context.storage.openWrite(parentMetadata.getName()).join();
context.storage.concat(parentWriteHandle, 0, transactionWriteHandle.getSegmentName(), TIMEOUT).join();
parentMetadata.setStorageLength(parentMetadata.getLength());
context.readIndex.completeMerge(parentId, transactionId);
// Attempt to extract data from the read.
entry.requestContent(TIMEOUT);
ReadResultEntryContents contents = entry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
byte[] readData = new byte[contents.getLength()];
StreamHelpers.readAll(contents.getData(), readData, 0, readData.length);
Assert.assertArrayEquals("Unexpected data read from parent segment.", writeData, readData);
}
use of io.pravega.segmentstore.contracts.ReadResultEntry in project pravega by pravega.
the class StreamSegmentReadResultTests method testNextTerminal.
private void testNextTerminal(BiFunction<Long, Integer, TestReadResultEntry> terminalEntryCreator) {
AtomicReference<TestReadResultEntry> nextEntry = new AtomicReference<>();
StreamSegmentReadResult.NextEntrySupplier nes = (offset, length) -> nextEntry.get();
// We issue a read with length = MAX_RESULT_LENGTH, and return only half the items, 1 byte at a time.
@Cleanup StreamSegmentReadResult r = new StreamSegmentReadResult(START_OFFSET, MAX_RESULT_LENGTH, nes, "");
for (int i = 0; i < MAX_RESULT_LENGTH / 2; i++) {
// Setup an item to be returned.
final long expectedStartOffset = START_OFFSET + i;
final int expectedReadLength = MAX_RESULT_LENGTH - i;
nextEntry.set(TestReadResultEntry.cache(expectedStartOffset, expectedReadLength));
r.next();
nextEntry.get().complete(new ReadResultEntryContents(null, READ_ITEM_LENGTH));
}
// Verify we have not reached the end.
AssertExtensions.assertLessThan("Unexpected state of the StreamSegmentReadResult when consuming half of the result.", r.getMaxResultLength(), r.getConsumedLength());
Assert.assertTrue("hasNext() did not return true when more items are to be consumed.", r.hasNext());
// Next time we call next(), return an End-of-StreamSegment entry.
nextEntry.set(terminalEntryCreator.apply((long) START_OFFSET + MAX_RESULT_LENGTH / 2, MAX_RESULT_LENGTH / 2));
ReadResultEntry resultEntry = r.next();
Assert.assertEquals("Unexpected result from nextEntry() when returning the terminal item from the result.", nextEntry.get(), resultEntry);
Assert.assertFalse("hasNext() did not return false when reaching a terminal state.", r.hasNext());
resultEntry = r.next();
Assert.assertNull("next() did return null when it encountered a terminal state.", resultEntry);
}
use of io.pravega.segmentstore.contracts.ReadResultEntry in project pravega by pravega.
the class ReadTest method testReadDirectlyFromStore.
@Test
public void testReadDirectlyFromStore() throws InterruptedException, ExecutionException, IOException {
String segmentName = "testReadFromStore";
final int entries = 10;
final byte[] data = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
UUID clientId = UUID.randomUUID();
StreamSegmentStore segmentStore = serviceBuilder.createStreamSegmentService();
fillStoreForSegment(segmentName, clientId, data, entries, segmentStore);
ReadResult result = segmentStore.read(segmentName, 0, entries * data.length, Duration.ZERO).get();
int index = 0;
while (result.hasNext()) {
ReadResultEntry entry = result.next();
ReadResultEntryType type = entry.getType();
assertEquals(ReadResultEntryType.Cache, type);
// Each ReadResultEntryContents may be of an arbitrary length - we should make no assumptions.
ReadResultEntryContents contents = entry.getContent().get();
byte next;
while ((next = (byte) contents.getData().read()) != -1) {
byte expected = data[index % data.length];
assertEquals(expected, next);
index++;
}
}
assertEquals(entries * data.length, index);
}
use of io.pravega.segmentstore.contracts.ReadResultEntry in project pravega by pravega.
the class StreamSegmentContainerTests method testConcurrentSegmentActivation.
/**
* Tests the ability for the StreamSegmentContainer to handle concurrent actions on a Segment that it does not know
* anything about, and handling the resulting concurrency.
* Note: this is tested with a single segment. It could be tested with multiple segments, but different segments
* are mostly independent of each other, so we would not be gaining much by doing so.
*/
@Test
public void testConcurrentSegmentActivation() throws Exception {
final UUID attributeAccumulate = UUID.randomUUID();
final long expectedAttributeValue = APPENDS_PER_SEGMENT + ATTRIBUTE_UPDATES_PER_SEGMENT;
final int appendLength = 10;
@Cleanup TestContext context = new TestContext();
context.container.startAsync().awaitRunning();
// 1. Create the StreamSegments.
String segmentName = createSegments(context).get(0);
// 2. Add some appends.
List<CompletableFuture<Void>> opFutures = Collections.synchronizedList(new ArrayList<>());
AtomicLong expectedLength = new AtomicLong();
@Cleanup("shutdown") ExecutorService testExecutor = newScheduledThreadPool(Math.min(20, APPENDS_PER_SEGMENT), "testConcurrentSegmentActivation");
val submitFutures = new ArrayList<Future<?>>();
for (int i = 0; i < APPENDS_PER_SEGMENT; i++) {
final byte fillValue = (byte) i;
submitFutures.add(testExecutor.submit(() -> {
Collection<AttributeUpdate> attributeUpdates = Collections.singleton(new AttributeUpdate(attributeAccumulate, AttributeUpdateType.Accumulate, 1));
byte[] appendData = new byte[appendLength];
Arrays.fill(appendData, (byte) (fillValue + 1));
opFutures.add(context.container.append(segmentName, appendData, attributeUpdates, TIMEOUT));
expectedLength.addAndGet(appendData.length);
}));
}
// 2.1 Update the attribute.
for (int i = 0; i < ATTRIBUTE_UPDATES_PER_SEGMENT; i++) {
submitFutures.add(testExecutor.submit(() -> {
Collection<AttributeUpdate> attributeUpdates = new ArrayList<>();
attributeUpdates.add(new AttributeUpdate(attributeAccumulate, AttributeUpdateType.Accumulate, 1));
opFutures.add(context.container.updateAttributes(segmentName, attributeUpdates, TIMEOUT));
}));
}
// Wait for the submittal of tasks to complete.
submitFutures.forEach(this::await);
// Now wait for all the appends to finish.
Futures.allOf(opFutures).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// 3. getSegmentInfo: verify final state of the attribute.
SegmentProperties sp = context.container.getStreamSegmentInfo(segmentName, false, TIMEOUT).join();
Assert.assertEquals("Unexpected length for segment " + segmentName, expectedLength.get(), sp.getLength());
Assert.assertFalse("Unexpected value for isDeleted for segment " + segmentName, sp.isDeleted());
Assert.assertFalse("Unexpected value for isSealed for segment " + segmentName, sp.isDeleted());
// Verify all attribute values.
Assert.assertEquals("Unexpected value for attribute " + attributeAccumulate + " for segment " + segmentName, expectedAttributeValue, (long) sp.getAttributes().getOrDefault(attributeAccumulate, SegmentMetadata.NULL_ATTRIBUTE_VALUE));
checkActiveSegments(context.container, 1);
// 4. Written data.
waitForOperationsInReadIndex(context.container);
byte[] actualData = new byte[(int) expectedLength.get()];
int offset = 0;
@Cleanup ReadResult readResult = context.container.read(segmentName, 0, actualData.length, TIMEOUT).join();
while (readResult.hasNext()) {
ReadResultEntry readEntry = readResult.next();
ReadResultEntryContents readEntryContents = readEntry.getContent().join();
AssertExtensions.assertLessThanOrEqual("Too much to read.", actualData.length, offset + actualData.length);
StreamHelpers.readAll(readEntryContents.getData(), actualData, offset, actualData.length);
offset += actualData.length;
}
Assert.assertEquals("Unexpected number of bytes read.", actualData.length, offset);
Assert.assertTrue("Unexpected number of bytes read (multiple of appendLength).", actualData.length % appendLength == 0);
boolean[] observedValues = new boolean[APPENDS_PER_SEGMENT + 1];
for (int i = 0; i < actualData.length; i += appendLength) {
byte value = actualData[i];
Assert.assertFalse("Append with value " + value + " was written multiple times.", observedValues[value]);
observedValues[value] = true;
for (int j = 1; j < appendLength; j++) {
Assert.assertEquals("Append was not written atomically at offset " + (i + j), value, actualData[i + j]);
}
}
// Verify all the appends made it (we purposefully did not write 0, since that's the default fill value in an array).
Assert.assertFalse("Not expecting 0 as a value.", observedValues[0]);
for (int i = 1; i < observedValues.length; i++) {
Assert.assertTrue("Append with value " + i + " was not written.", observedValues[i]);
}
context.container.stopAsync().awaitTerminated();
}
Aggregations