use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class IdempotentStorageTestBase method testWrite.
/**
* Tests the write() method.
*
* @throws Exception if an unexpected error occurred.
*/
@Override
@Test(timeout = 30000)
public void testWrite() throws Exception {
String segmentName = "foo_write";
int appendCount = 100;
try (Storage s = createStorage()) {
s.initialize(DEFAULT_EPOCH);
s.create(segmentName, TIMEOUT).join();
// Invalid handle.
val readOnlyHandle = s.openRead(segmentName).join();
assertThrows("write() did not throw for read-only handle.", () -> s.write(readOnlyHandle, 0, new ByteArrayInputStream("h".getBytes()), 1, TIMEOUT), ex -> ex instanceof IllegalArgumentException);
assertThrows("write() did not throw for handle pointing to inexistent segment.", () -> s.write(createInexistentSegmentHandle(s, false), 0, new ByteArrayInputStream("h".getBytes()), 1, TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
val writeHandle = s.openWrite(segmentName).join();
long offset = 0;
for (int j = 0; j < appendCount; j++) {
byte[] writeData = String.format("Segment_%s_Append_%d", segmentName, j).getBytes();
ByteArrayInputStream dataStream = new ByteArrayInputStream(writeData);
s.write(writeHandle, offset, dataStream, writeData.length, TIMEOUT).join();
offset += writeData.length;
}
// Check bad offset.
final long finalOffset = offset;
assertThrows("write() did not throw bad offset write (larger).", () -> s.write(writeHandle, finalOffset + 1, new ByteArrayInputStream("h".getBytes()), 1, TIMEOUT), ex -> ex instanceof BadOffsetException);
// Check post-delete write.
s.delete(writeHandle, TIMEOUT).join();
assertThrows("write() did not throw for a deleted StreamSegment.", () -> s.write(writeHandle, 0, new ByteArrayInputStream(new byte[1]), 1, TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
}
}
use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class FileSystemStorageTest method testWrite.
// region Write tests with metrics checks
/**
* Tests the write() method.
*
* @throws Exception if an unexpected error occurred.
*/
@Override
@Test(timeout = 30000)
public void testWrite() throws Exception {
String segmentName = "foo_write";
int appendCount = 100;
try (Storage s = createStorage()) {
s.initialize(DEFAULT_EPOCH);
s.create(segmentName, TIMEOUT).join();
long expectedMetricsSize = FileSystemMetrics.WRITE_BYTES.get();
long expectedMetricsSuccesses = FileSystemMetrics.WRITE_LATENCY.toOpStatsData().getNumSuccessfulEvents();
// Invalid handle.
val readOnlyHandle = s.openRead(segmentName).join();
assertThrows("write() did not throw for read-only handle.", () -> s.write(readOnlyHandle, 0, new ByteArrayInputStream("h".getBytes()), 1, TIMEOUT), ex -> ex instanceof IllegalArgumentException);
assertThrows("write() did not throw for handle pointing to inexistent segment.", () -> s.write(createInexistentSegmentHandle(s, false), 0, new ByteArrayInputStream("h".getBytes()), 1, TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
Assert.assertEquals("WRITE_BYTES should not change in case of unsuccessful writes", expectedMetricsSize, FileSystemMetrics.WRITE_BYTES.get());
Assert.assertEquals("WRITE_LATENCY should not increase the count of successful events in case of unsuccessful writes", expectedMetricsSuccesses, FileSystemMetrics.WRITE_LATENCY.toOpStatsData().getNumSuccessfulEvents());
val writeHandle = s.openWrite(segmentName).join();
long offset = 0;
for (int j = 0; j < appendCount; j++) {
byte[] writeData = String.format("Segment_%s_Append_%d", segmentName, j).getBytes();
ByteArrayInputStream dataStream = new ByteArrayInputStream(writeData);
s.write(writeHandle, offset, dataStream, writeData.length, TIMEOUT).join();
expectedMetricsSize += writeData.length;
expectedMetricsSuccesses += 1;
Assert.assertEquals("WRITE_LATENCY should increase the count of successful events in case of successful writes", expectedMetricsSuccesses, FileSystemMetrics.WRITE_LATENCY.toOpStatsData().getNumSuccessfulEvents());
Assert.assertEquals("WRITE_BYTES should increase by the size of successful writes", expectedMetricsSize, FileSystemMetrics.WRITE_BYTES.get());
offset += writeData.length;
}
// Check bad offset.
final long finalOffset = offset;
assertThrows("write() did not throw bad offset write (larger).", () -> s.write(writeHandle, finalOffset + 1, new ByteArrayInputStream("h".getBytes()), 1, TIMEOUT), ex -> ex instanceof BadOffsetException);
Assert.assertEquals("WRITE_BYTES should not change in case of unsuccessful writes", expectedMetricsSize, FileSystemMetrics.WRITE_BYTES.get());
Assert.assertEquals("WRITE_LATENCY should not increase the count of successful events in case of unsuccessful writes", expectedMetricsSuccesses, FileSystemMetrics.WRITE_LATENCY.toOpStatsData().getNumSuccessfulEvents());
// Check post-delete write.
s.delete(writeHandle, TIMEOUT).join();
assertThrows("write() did not throw for a deleted StreamSegment.", () -> s.write(writeHandle, 0, new ByteArrayInputStream(new byte[1]), 1, TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
Assert.assertEquals("WRITE_BYTES should not change in case of unsuccessful writes", expectedMetricsSize, FileSystemMetrics.WRITE_BYTES.get());
Assert.assertEquals("WRITE_LATENCY should not increase the count of successful events in case of unsuccessful writes", expectedMetricsSuccesses, FileSystemMetrics.WRITE_LATENCY.toOpStatsData().getNumSuccessfulEvents());
}
}
use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class RollingStorageTestBase method testSuccessiveConcats.
/**
* Tests a scenario that would concatenate various segments successively into an initially empty segment while not
* producing an excessive number of chunks. The initial concat will use header merge since the segment has no chunks,
* but successive concats should unseal that last chunk and concat to it using the native method.
* <p>
* NOTE: this could be moved down into RollingStorageTests.java, however it being here ensures that unseal() is being
* exercised in all classes that derive from this, which is all of the Storage implementations.
*
* @throws Exception If one occurred.
*/
@Test
public void testSuccessiveConcats() throws Exception {
final String segmentName = "Segment";
final int writeLength = 21;
final int concatCount = 10;
@Cleanup val s = createStorage();
s.initialize(1);
// Create Target Segment with infinite rolling. Do not write anything to it yet.
val writeHandle = s.create(segmentName, SegmentRollingPolicy.NO_ROLLING, TIMEOUT).thenCompose(v -> s.openWrite(segmentName)).join();
final Random rnd = new Random(0);
byte[] writeBuffer = new byte[writeLength];
val writeStream = new ByteArrayOutputStream();
for (int i = 0; i < concatCount; i++) {
// Create a source segment, write a little bit to it, then seal & merge it.
String sourceSegment = segmentName + "_Source_" + i;
val sourceHandle = s.create(sourceSegment, TIMEOUT).thenCompose(v -> s.openWrite(sourceSegment)).join();
rnd.nextBytes(writeBuffer);
s.write(sourceHandle, 0, new ByteArrayInputStream(writeBuffer), writeBuffer.length, TIMEOUT).join();
s.seal(sourceHandle, TIMEOUT).join();
s.concat(writeHandle, writeStream.size(), sourceSegment, TIMEOUT).join();
writeStream.write(writeBuffer);
}
// Write directly to the target segment - this ensures that writes themselves won't create a new chunk if the
// write can still fit into the last chunk.
rnd.nextBytes(writeBuffer);
s.write(writeHandle, writeStream.size(), new ByteArrayInputStream(writeBuffer), writeBuffer.length, TIMEOUT).join();
writeStream.write(writeBuffer);
// Get a read handle, which will also fetch the number of chunks for us.
val readHandle = (RollingSegmentHandle) s.openRead(segmentName).join();
Assert.assertEquals("Unexpected number of chunks created.", 1, readHandle.chunks().size());
val writtenData = writeStream.toByteArray();
byte[] readBuffer = new byte[writtenData.length];
int bytesRead = s.read(readHandle, 0, readBuffer, 0, readBuffer.length, TIMEOUT).join();
Assert.assertEquals("Unexpected number of bytes read.", readBuffer.length, bytesRead);
Assert.assertArrayEquals("Unexpected data read back.", writtenData, readBuffer);
}
use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class StreamSegmentContainerTests method testFutureReads.
/**
* Tests the ability to perform future (tail) reads. Scenarios tested include:
* * Regular appends
* * Segment sealing
* * Transaction merging.
*/
@Test
public void testFutureReads() throws Exception {
final int nonSealReadLimit = 100;
@Cleanup TestContext context = new TestContext();
context.container.startAsync().awaitRunning();
// 1. Create the StreamSegments.
ArrayList<String> segmentNames = createSegments(context);
HashMap<String, ArrayList<String>> transactionsBySegment = createTransactions(segmentNames, context);
activateAllSegments(segmentNames, context);
transactionsBySegment.values().forEach(s -> activateAllSegments(s, context));
HashMap<String, ReadResult> readsBySegment = new HashMap<>();
ArrayList<AsyncReadResultProcessor> readProcessors = new ArrayList<>();
HashSet<String> segmentsToSeal = new HashSet<>();
HashMap<String, ByteArrayOutputStream> readContents = new HashMap<>();
HashMap<String, TestReadResultHandler> entryHandlers = new HashMap<>();
// should stop upon reaching the limit).
for (int i = 0; i < segmentNames.size(); i++) {
String segmentName = segmentNames.get(i);
ByteArrayOutputStream readContentsStream = new ByteArrayOutputStream();
readContents.put(segmentName, readContentsStream);
ReadResult readResult;
if (i < segmentNames.size() / 2) {
// We're going to seal this one at one point.
segmentsToSeal.add(segmentName);
readResult = context.container.read(segmentName, 0, Integer.MAX_VALUE, TIMEOUT).join();
} else {
// Just a regular one, nothing special.
readResult = context.container.read(segmentName, 0, nonSealReadLimit, TIMEOUT).join();
}
// The Read callback is only accumulating data in this test; we will then compare it against the real data.
TestReadResultHandler entryHandler = new TestReadResultHandler(readContentsStream, TIMEOUT);
entryHandlers.put(segmentName, entryHandler);
readsBySegment.put(segmentName, readResult);
readProcessors.add(AsyncReadResultProcessor.process(readResult, entryHandler, executorService()));
}
// 3. Add some appends.
HashMap<String, Long> lengths = new HashMap<>();
HashMap<String, ByteArrayOutputStream> segmentContents = new HashMap<>();
appendToParentsAndTransactions(segmentNames, transactionsBySegment, lengths, segmentContents, context);
// 4. Merge all the Transactions.
mergeTransactions(transactionsBySegment, lengths, segmentContents, context);
// 5. Add more appends (to the parent segments)
ArrayList<CompletableFuture<Void>> operationFutures = new ArrayList<>();
for (int i = 0; i < 5; i++) {
for (String segmentName : segmentNames) {
byte[] appendData = getAppendData(segmentName, APPENDS_PER_SEGMENT + i);
operationFutures.add(context.container.append(segmentName, appendData, null, TIMEOUT));
lengths.put(segmentName, lengths.getOrDefault(segmentName, 0L) + appendData.length);
recordAppend(segmentName, appendData, segmentContents);
}
}
segmentsToSeal.forEach(segmentName -> operationFutures.add(Futures.toVoid(context.container.sealStreamSegment(segmentName, TIMEOUT))));
Futures.allOf(operationFutures).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Now wait for all the reads to complete, and verify their results against the expected output.
Futures.allOf(entryHandlers.values().stream().map(h -> h.getCompleted()).collect(Collectors.toList())).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
readProcessors.forEach(AsyncReadResultProcessor::close);
// Check to see if any errors got thrown (and caught) during the reading process).
for (Map.Entry<String, TestReadResultHandler> e : entryHandlers.entrySet()) {
Throwable err = e.getValue().getError().get();
if (err != null) {
// The next check (see below) will verify if the segments were properly read).
if (!(err instanceof StreamSegmentSealedException && segmentsToSeal.contains(e.getKey()))) {
Assert.fail("Unexpected error happened while processing Segment " + e.getKey() + ": " + e.getValue().getError().get());
}
}
}
// Check that all the ReadResults are closed
for (Map.Entry<String, ReadResult> e : readsBySegment.entrySet()) {
Assert.assertTrue("Read result is not closed for segment " + e.getKey(), e.getValue().isClosed());
}
// Compare, byte-by-byte, the outcome of the tail reads.
Assert.assertEquals("Unexpected number of segments were read.", segmentContents.size(), readContents.size());
for (String segmentName : segmentNames) {
boolean isSealed = segmentsToSeal.contains(segmentName);
byte[] expectedData = segmentContents.get(segmentName).toByteArray();
byte[] actualData = readContents.get(segmentName).toByteArray();
int expectedLength = isSealed ? (int) (long) lengths.get(segmentName) : nonSealReadLimit;
Assert.assertEquals("Unexpected read length for segment " + segmentName, expectedLength, actualData.length);
AssertExtensions.assertArrayEquals("Unexpected read contents for segment " + segmentName, expectedData, 0, actualData, 0, actualData.length);
}
// 6. Writer moving data to Storage.
waitForSegmentsInStorage(segmentNames, context).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
checkStorage(segmentContents, lengths, context);
}
use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class DurableLogTests method testTruncateWithRecovery.
/**
* Tests the truncate() method while performing recovery.
*/
@Test
public void testTruncateWithRecovery() {
int streamSegmentCount = 50;
int appendsPerStreamSegment = 20;
// Setup a DurableLog and start it.
AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
AtomicReference<Boolean> truncationOccurred = new AtomicReference<>();
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
@Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
@Cleanup ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
HashSet<Long> streamSegmentIds;
List<OperationWithCompletion> completionFutures;
List<Operation> originalOperations;
// First DurableLog. We use this for generating data.
try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Generate some test data (we need to do this after we started the DurableLog because in the process of
// recovery, it wipes away all existing metadata).
streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
completionFutures = processOperations(queuedOperations, durableLog);
OperationWithCompletion.allOf(completionFutures).join();
// Get a list of all the operations, before any truncation.
originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
// Truncate up to each MetadataCheckpointOperation and:
// * If the DataLog was truncated:
// ** Shut down DurableLog, re-start it (recovery) and verify the operations are as they should.
// At the end, verify all operations and all entries in the DataLog were truncated.
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService());
try {
durableLog.startAsync().awaitRunning();
dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
for (int i = 0; i < originalOperations.size(); i++) {
Operation currentOperation = originalOperations.get(i);
if (!(currentOperation instanceof MetadataCheckpointOperation)) {
// We can only truncate on MetadataCheckpointOperations.
continue;
}
truncationOccurred.set(false);
durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT).join();
if (truncationOccurred.get()) {
// Close current DurableLog and start a brand new one, forcing recovery.
durableLog.close();
durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService());
durableLog.startAsync().awaitRunning();
dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
// Verify all operations up to, and including this one have been removed.
Iterator<Operation> reader = durableLog.read(-1, 2, TIMEOUT).join();
Assert.assertTrue("Not expecting an empty log after truncating an operation (a MetadataCheckpoint must always exist).", reader.hasNext());
verifyFirstItemIsMetadataCheckpoint(reader);
if (i < originalOperations.size() - 1) {
Operation firstOp = reader.next();
OperationComparer.DEFAULT.assertEquals(String.format("Unexpected first operation after truncating SeqNo %d.", currentOperation.getSequenceNumber()), originalOperations.get(i + 1), firstOp);
}
}
}
} finally {
// This closes whatever current instance this variable refers to, not necessarily the first one.
durableLog.close();
}
}
Aggregations