use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class StorageReadManagerTests method testInvalidRequests.
/**
* Tests the execute method with invalid Requests.
* * StreamSegment does not exist
* * Invalid read offset
* * Too long of a read (offset+length is beyond the Segment's length)
*/
@Test
public void testInvalidRequests() {
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
byte[] segmentData = populateSegment(storage);
@Cleanup StorageReadManager reader = new StorageReadManager(SEGMENT_METADATA, storage, executorService());
// Segment does not exist.
AssertExtensions.assertThrows("Request was not failed when StreamSegment does not exist.", () -> {
SegmentMetadata sm = new StreamSegmentMetadata("foo", 0, 0);
@Cleanup StorageReadManager nonExistentReader = new StorageReadManager(sm, storage, executorService());
sendRequest(nonExistentReader, 0, 1).join();
}, ex -> ex instanceof StreamSegmentNotExistsException);
// Invalid read offset.
AssertExtensions.assertThrows("Request was not failed when bad offset was provided.", () -> sendRequest(reader, segmentData.length + 1, 1), ex -> ex instanceof ArrayIndexOutOfBoundsException);
// Invalid read length.
AssertExtensions.assertThrows("Request was not failed when bad offset + length was provided.", () -> sendRequest(reader, segmentData.length - 1, 2), ex -> ex instanceof ArrayIndexOutOfBoundsException);
}
use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class StorageReadManagerTests method testValidRequests.
/**
* Tests the execute method with valid Requests:
* * All StreamSegments exist and have enough data.
* * All read offsets are valid (but we may choose to read more than the length of the Segment).
* * ReadRequests may overlap.
*/
@Test
public void testValidRequests() throws Exception {
final int defaultReadLength = MIN_SEGMENT_LENGTH - 1;
final int offsetIncrement = defaultReadLength / 3;
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
byte[] segmentData = populateSegment(storage);
@Cleanup StorageReadManager reader = new StorageReadManager(SEGMENT_METADATA, storage, executorService());
HashMap<StorageReadManager.Request, CompletableFuture<StorageReadManager.Result>> requestCompletions = new HashMap<>();
int readOffset = 0;
while (readOffset < segmentData.length) {
int readLength = Math.min(defaultReadLength, segmentData.length - readOffset);
CompletableFuture<StorageReadManager.Result> requestCompletion = new CompletableFuture<>();
StorageReadManager.Request r = new StorageReadManager.Request(readOffset, readLength, requestCompletion::complete, requestCompletion::completeExceptionally, TIMEOUT);
reader.execute(r);
requestCompletions.put(r, requestCompletion);
readOffset += offsetIncrement;
}
// Check that the read requests returned with the right data.
for (val entry : requestCompletions.entrySet()) {
StorageReadManager.Result readData = entry.getValue().join();
StorageReadManager.Request request = entry.getKey();
int expectedReadLength = Math.min(request.getLength(), (int) (segmentData.length - request.getOffset()));
Assert.assertNotNull("No data returned for request " + request, readData);
Assert.assertEquals("Unexpected read length for request " + request, expectedReadLength, readData.getData().getLength());
AssertExtensions.assertStreamEquals("Unexpected read contents for request " + request, new ByteArrayInputStream(segmentData, (int) request.getOffset(), expectedReadLength), readData.getData().getReader(), expectedReadLength);
}
}
use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class StreamSegmentStorageReaderTests method populate.
private byte[] populate(Storage s) {
byte[] data = new byte[SEGMENT_LENGTH];
val rnd = new Random(0);
rnd.nextBytes(data);
val handle = s.create(SEGMENT_NAME, TIMEOUT).thenCompose(si -> s.openWrite(SEGMENT_NAME)).join();
final int appendSize = data.length / SEGMENT_APPEND_COUNT;
int offset = 0;
for (int i = 0; i < appendSize; i++) {
int writeLength = Math.min(appendSize, data.length - offset);
s.write(handle, offset, new ByteArrayInputStream(data, offset, writeLength), writeLength, TIMEOUT).join();
offset += writeLength;
}
return data;
}
use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class IdempotentStorageTestBase method testParallelWriteTwoHosts.
// endregion
// region synchronization unit tests
/**
* This test case simulates two hosts writing at the same offset at the same time.
*/
@Test(timeout = 30000)
public void testParallelWriteTwoHosts() {
String segmentName = "foo_write";
int appendCount = 5;
try (Storage s1 = createStorage();
Storage s2 = createStorage()) {
s1.initialize(DEFAULT_EPOCH);
s1.create(segmentName, TIMEOUT).join();
SegmentHandle writeHandle1 = s1.openWrite(segmentName).join();
SegmentHandle writeHandle2 = s2.openWrite(segmentName).join();
long offset = 0;
byte[] writeData = String.format("Segment_%s_Append", segmentName).getBytes();
for (int j = 0; j < appendCount; j++) {
ByteArrayInputStream dataStream1 = new ByteArrayInputStream(writeData);
ByteArrayInputStream dataStream2 = new ByteArrayInputStream(writeData);
CompletableFuture f1 = s1.write(writeHandle1, offset, dataStream1, writeData.length, TIMEOUT);
CompletableFuture f2 = s2.write(writeHandle2, offset, dataStream2, writeData.length, TIMEOUT);
assertMayThrow("Write expected to complete OR throw BadOffsetException." + "threw an unexpected exception.", () -> CompletableFuture.allOf(f1, f2), ex -> ex instanceof BadOffsetException);
// Make sure at least one operation is success.
Assert.assertTrue("At least one of the two parallel writes should succeed.", !f1.isCompletedExceptionally() || !f2.isCompletedExceptionally());
offset += writeData.length;
}
Assert.assertTrue("Writes at the same offset are expected to be idempotent.", s1.getStreamSegmentInfo(segmentName, TIMEOUT).join().getLength() == offset);
offset = 0;
byte[] readBuffer = new byte[writeData.length];
for (int j = 0; j < appendCount; j++) {
int bytesRead = s1.read(writeHandle1, j * readBuffer.length, readBuffer, 0, readBuffer.length, TIMEOUT).join();
Assert.assertEquals(String.format("Unexpected number of bytes read from offset %d.", offset), readBuffer.length, bytesRead);
AssertExtensions.assertArrayEquals(String.format("Unexpected read result from offset %d.", offset), readBuffer, 0, readBuffer, 0, bytesRead);
}
s1.delete(writeHandle1, TIMEOUT).join();
}
}
use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class IdempotentStorageTestBase method testPartialConcat.
/**
* This test case simulates host crashing during concat and retrying the operation.
*/
@Test(timeout = 30000)
public void testPartialConcat() {
String segmentName = "foo_write";
String concatSegmentName = "foo_concat";
String newConcatSegmentName = "foo_concat0";
int offset = 0;
try (Storage s1 = createStorage()) {
s1.initialize(DEFAULT_EPOCH);
s1.create(segmentName, TIMEOUT).join();
s1.create(concatSegmentName, TIMEOUT).join();
SegmentHandle writeHandle1 = s1.openWrite(segmentName).join();
SegmentHandle writeHandle2 = s1.openWrite(concatSegmentName).join();
byte[] writeData = String.format("Segment_%s_Append", segmentName).getBytes();
ByteArrayInputStream dataStream1 = new ByteArrayInputStream(writeData);
ByteArrayInputStream dataStream2 = new ByteArrayInputStream(writeData);
s1.write(writeHandle1, offset, dataStream1, writeData.length, TIMEOUT).join();
s1.write(writeHandle2, offset, dataStream2, writeData.length, TIMEOUT).join();
s1.seal(writeHandle2, TIMEOUT).join();
// This will append the segments and delete the concat segment.
s1.concat(writeHandle1, writeData.length, concatSegmentName, TIMEOUT).join();
long lengthBeforeRetry = s1.getStreamSegmentInfo(segmentName, TIMEOUT).join().getLength();
// Create the segment again.
s1.create(newConcatSegmentName, TIMEOUT).join();
writeHandle2 = s1.openWrite(newConcatSegmentName).join();
dataStream2 = new ByteArrayInputStream(writeData);
s1.write(writeHandle2, offset, dataStream2, writeData.length, TIMEOUT).join();
s1.seal(writeHandle2, TIMEOUT).join();
// Concat at the same offset again
s1.concat(writeHandle1, writeData.length, newConcatSegmentName, TIMEOUT).join();
long lengthAfterRetry = s1.getStreamSegmentInfo(segmentName, TIMEOUT).join().getLength();
Assert.assertTrue(String.format("Concatenation of same segment at the same offset(%d) should result in " + "same segment size(%d), but is (%d)", writeData.length, lengthBeforeRetry, lengthAfterRetry), lengthBeforeRetry == lengthAfterRetry);
// Verify the data
byte[] readBuffer = new byte[writeData.length];
for (int j = 0; j < 2; j++) {
int bytesRead = s1.read(writeHandle1, j * readBuffer.length, readBuffer, 0, readBuffer.length, TIMEOUT).join();
Assert.assertEquals(String.format("Unexpected number of bytes read from offset %d.", offset), readBuffer.length, bytesRead);
AssertExtensions.assertArrayEquals(String.format("Unexpected read result from offset %d.", offset), readBuffer, (int) offset, readBuffer, 0, bytesRead);
}
s1.delete(writeHandle1, TIMEOUT).join();
}
}
Aggregations