use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class DurableLogTests method testTailReads.
/**
* Tests the ability to block reads if the read is at the tail and no more data is available (for now).
*/
@Test
public void testTailReads() throws Exception {
final int operationCount = 10;
final long segmentId = 1;
final String segmentName = Long.toString(segmentId);
// Setup a DurableLog and start it.
@Cleanup ContainerSetup setup = new ContainerSetup(executorService());
@Cleanup DurableLog durableLog = setup.createDurableLog();
durableLog.startAsync().awaitRunning();
// Create a segment, which will be used for testing later.
UpdateableSegmentMetadata segmentMetadata = setup.metadata.mapStreamSegmentId(segmentName, segmentId);
segmentMetadata.setLength(0);
segmentMetadata.setStorageLength(0);
// A MetadataCheckpointOperation gets auto-queued upon the first startup. Get it out of our way for this test.
val checkpointRead = durableLog.read(1, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertTrue("Expected first read operation to be a MetadataCheckpointOperation.", checkpointRead.size() == 1 && checkpointRead.poll() instanceof MetadataCheckpointOperation);
// Setup a read operation, and make sure it is blocked (since there is no data).
val readFuture = durableLog.read(operationCount, TIMEOUT);
Assert.assertFalse("read() returned a completed future when there is no data available", readFuture.isDone());
// Add one operation and verify that the Read was activated.
OperationComparer operationComparer = new OperationComparer(true);
Operation operation = new StreamSegmentAppendOperation(segmentId, new ByteArraySegment("TestData".getBytes()), null);
durableLog.add(operation, OperationPriority.Normal, TIMEOUT).join();
// The internal callback happens asynchronously, so wait for this future to complete in a bit.
val readResult = readFuture.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Verify that we actually have a non-empty read result.
Assert.assertFalse(readResult.isEmpty());
// Verify the read result.
Operation readOp = readResult.poll();
operationComparer.assertEquals("Unexpected result operation for read.", operation, readOp);
// Verify that we don't have more than one read result.
Assert.assertTrue(readResult.isEmpty());
// Verify that such reads are cancelled when the DurableLog is closed.
val cancelledRead = durableLog.read(operationCount, TIMEOUT);
Assert.assertFalse("read() returned a completed future when there is no data available (afterSeqNo = MAX).", cancelledRead.isDone());
durableLog.stopAsync().awaitTerminated();
Assert.assertTrue("A tail read was not cancelled when the DurableLog was stopped.", cancelledRead.isCancelled());
}
use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class TableUpdate method generateKey.
// endregion
static BufferView generateKey(UUID keyId, int keyLength) {
assert keyLength >= 8 : "keyLength must be at least 8 bytes";
// We "serialize" the KeyId using English words for each digit.
val result = new ByteArraySegment(new byte[keyLength]);
int count = keyLength >> 4;
int offset = 0;
for (int i = 0; i < count; i++) {
result.setLong(offset, keyId.getMostSignificantBits());
result.setLong(offset, keyId.getLeastSignificantBits());
offset += 16;
}
if (keyLength - offset >= 8) {
result.setLong(offset, keyId.getMostSignificantBits());
}
return result;
}
use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class SegmentStoreReader method readExact.
@Override
public CompletableFuture<ReadItem> readExact(String segmentName, Object address) {
Exceptions.checkNotNullOrEmpty(segmentName, "segmentName");
Preconditions.checkArgument(address instanceof Address, "Unexpected address type.");
Address a = (Address) address;
return this.store.read(segmentName, a.offset, a.length, this.testConfig.getTimeout()).thenApplyAsync(readResult -> {
byte[] data = new byte[a.length];
readResult.readRemaining(data, this.testConfig.getTimeout());
return new SegmentStoreReadItem(new Event(new ByteArraySegment(data), 0), address);
}, this.executor);
}
use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class SegmentAggregatorTests method generateAppendAndUpdateMetadata.
private StorageOperation generateAppendAndUpdateMetadata(long segmentId, byte[] data, TestContext context) {
UpdateableSegmentMetadata segmentMetadata = context.containerMetadata.getStreamSegmentMetadata(segmentId);
long offset = segmentMetadata.getLength();
segmentMetadata.setLength(offset + data.length);
StreamSegmentAppendOperation op = new StreamSegmentAppendOperation(segmentId, new ByteArraySegment(data), null);
op.setStreamSegmentOffset(offset);
op.setSequenceNumber(context.containerMetadata.nextOperationSequenceNumber());
context.dataSource.recordAppend(op);
return new CachedStreamSegmentAppendOperation(op);
}
use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class ContainerReadIndexTests method testReadDirect.
/**
* Tests the readDirect() method on the ReadIndex.
*/
@Test
public void testReadDirect() throws Exception {
final int randomAppendLength = 1024;
@Cleanup TestContext context = new TestContext();
ArrayList<Long> segmentIds = new ArrayList<>();
final long segmentId = createSegment(0, context);
final UpdateableSegmentMetadata segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
segmentIds.add(segmentId);
HashMap<Long, ArrayList<Long>> transactionsBySegment = createTransactions(segmentIds, 1, context);
final long mergedTxId = transactionsBySegment.get(segmentId).get(0);
// Add data to all segments.
HashMap<Long, ByteArrayOutputStream> segmentContents = new HashMap<>();
transactionsBySegment.values().forEach(segmentIds::addAll);
appendData(segmentIds, segmentContents, context);
// Mark everything so far (minus a few bytes) as being written to storage.
segmentMetadata.setStorageLength(segmentMetadata.getLength() - 100);
// Now partially merge a second transaction
final long mergedTxOffset = beginMergeTransaction(mergedTxId, segmentMetadata, segmentContents, context);
// Add one more append after all of this.
final long endOfMergedDataOffset = segmentMetadata.getLength();
byte[] appendData = new byte[randomAppendLength];
new Random(0).nextBytes(appendData);
appendSingleWrite(segmentId, new ByteArraySegment(appendData), context);
recordAppend(segmentId, new ByteArraySegment(appendData), segmentContents);
// Verify we are not allowed to read from the range which has already been committed to Storage (invalid arguments).
for (AtomicLong offset = new AtomicLong(0); offset.get() < segmentMetadata.getStorageLength(); offset.incrementAndGet()) {
AssertExtensions.assertThrows(String.format("readDirect allowed reading from an illegal offset (%s).", offset), () -> context.readIndex.readDirect(segmentId, offset.get(), 1), ex -> ex instanceof IllegalArgumentException);
}
// Verify that any reads overlapping a merged transaction return null (that is, we cannot retrieve the requested data).
for (long offset = mergedTxOffset - 1; offset < endOfMergedDataOffset; offset++) {
val resultData = context.readIndex.readDirect(segmentId, offset, 2);
Assert.assertNull("readDirect() returned data overlapping a partially merged transaction", resultData);
}
// Verify that we can read from any other offset.
final byte[] expectedData = segmentContents.get(segmentId).toByteArray();
BiConsumer<Long, Long> verifyReadResult = (startOffset, endOffset) -> {
int readLength = (int) (endOffset - startOffset);
while (readLength > 0) {
BufferView actualDataBuffer;
try {
actualDataBuffer = context.readIndex.readDirect(segmentId, startOffset, readLength);
} catch (StreamSegmentNotExistsException ex) {
throw new CompletionException(ex);
}
Assert.assertNotNull(String.format("Unexpected result when data is readily available for Offset = %s, Length = %s.", startOffset, readLength), actualDataBuffer);
byte[] actualData = actualDataBuffer.getCopy();
AssertExtensions.assertArrayEquals("Unexpected data read from the segment at offset " + startOffset, expectedData, startOffset.intValue(), actualData, 0, actualData.length);
// Setup the read for the next test (where we read 1 less byte than now).
readLength--;
if (readLength % 2 == 0) {
// For every 2 bytes of decreased read length, increase the start offset by 1. This allows for a greater
// number of combinations to be tested.
startOffset++;
}
}
};
// Verify that we can read the cached data just after the StorageLength but before the merged transaction.
verifyReadResult.accept(segmentMetadata.getStorageLength(), mergedTxOffset);
// Verify that we can read the cached data just after the merged transaction but before the end of the segment.
verifyReadResult.accept(endOfMergedDataOffset, segmentMetadata.getLength());
}
Aggregations