use of io.pravega.segmentstore.contracts.StreamSegmentSealedException in project pravega by pravega.
the class StorageTestBase method testSeal.
/**
* Tests the seal() method.
*
* @throws Exception if an unexpected error occurred.
*/
@Test
public void testSeal() throws Exception {
final String context = "Seal";
try (Storage s = createStorage()) {
s.initialize(DEFAULT_EPOCH);
// Check segment not exists.
assertThrows("seal() did not throw for non-existent segment name.", () -> s.seal(createInexistentSegmentHandle(s, false), TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
HashMap<String, ByteArrayOutputStream> appendData = populate(s, context);
int deleteCount = 0;
for (String segmentName : appendData.keySet()) {
val readHandle = s.openRead(segmentName).join();
assertThrows("seal() did not throw for read-only handle.", () -> s.seal(readHandle, TIMEOUT), ex -> ex instanceof IllegalArgumentException);
val writeHandle = s.openWrite(segmentName).join();
s.seal(writeHandle, TIMEOUT).join();
// Seal is idempotent. Resealing an already sealed segment should work.
s.seal(writeHandle, TIMEOUT).join();
assertThrows("write() did not throw for a sealed StreamSegment.", () -> s.write(writeHandle, s.getStreamSegmentInfo(segmentName, TIMEOUT).join().getLength(), new ByteArrayInputStream("g".getBytes()), 1, TIMEOUT), ex -> ex instanceof StreamSegmentSealedException);
// Check post-delete seal. Half of the segments use the existing handle, and half will re-acquire it.
// We want to reacquire it because OpenWrite will return a read-only handle for sealed segments.
boolean reacquireHandle = (deleteCount++) % 2 == 0;
SegmentHandle deleteHandle = writeHandle;
if (reacquireHandle) {
deleteHandle = s.openWrite(segmentName).join();
}
s.delete(deleteHandle, TIMEOUT).join();
assertThrows("seal() did not throw for a deleted StreamSegment.", () -> s.seal(writeHandle, TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
}
}
}
use of io.pravega.segmentstore.contracts.StreamSegmentSealedException in project pravega by pravega.
the class StreamSegmentContainerTests method testSegmentSeal.
/**
* Test the seal operation on StreamSegments. Also tests the behavior of Reads (non-tailing) when encountering
* the end of a sealed StreamSegment.
*/
@Test
public void testSegmentSeal() throws Exception {
final int appendsPerSegment = 1;
@Cleanup TestContext context = new TestContext();
context.container.startAsync().awaitRunning();
// 1. Create the StreamSegments.
ArrayList<String> segmentNames = createSegments(context);
HashMap<String, ByteArrayOutputStream> segmentContents = new HashMap<>();
// 2. Add some appends.
ArrayList<CompletableFuture<Void>> appendFutures = new ArrayList<>();
HashMap<String, Long> lengths = new HashMap<>();
for (String segmentName : segmentNames) {
ByteArrayOutputStream segmentStream = new ByteArrayOutputStream();
segmentContents.put(segmentName, segmentStream);
for (int i = 0; i < appendsPerSegment; i++) {
byte[] appendData = getAppendData(segmentName, i);
appendFutures.add(context.container.append(segmentName, appendData, null, TIMEOUT));
lengths.put(segmentName, lengths.getOrDefault(segmentName, 0L) + appendData.length);
segmentStream.write(appendData);
}
}
Futures.allOf(appendFutures).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// 3. Seal first half of segments.
ArrayList<CompletableFuture<Long>> sealFutures = new ArrayList<>();
for (int i = 0; i < segmentNames.size() / 2; i++) {
sealFutures.add(context.container.sealStreamSegment(segmentNames.get(i), TIMEOUT));
}
Futures.allOf(sealFutures).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Check that the segments were properly sealed.
for (int i = 0; i < segmentNames.size(); i++) {
String segmentName = segmentNames.get(i);
boolean expectedSealed = i < segmentNames.size() / 2;
SegmentProperties sp = context.container.getStreamSegmentInfo(segmentName, false, TIMEOUT).join();
if (expectedSealed) {
Assert.assertTrue("Segment is not sealed when it should be " + segmentName, sp.isSealed());
Assert.assertEquals("Unexpected result from seal() future for segment " + segmentName, sp.getLength(), (long) sealFutures.get(i).join());
AssertExtensions.assertThrows("Container allowed appending to a sealed segment " + segmentName, context.container.append(segmentName, "foo".getBytes(), null, TIMEOUT)::join, ex -> ex instanceof StreamSegmentSealedException);
} else {
Assert.assertFalse("Segment is sealed when it shouldn't be " + segmentName, sp.isSealed());
// Verify we can still append to these segments.
byte[] appendData = "foo".getBytes();
context.container.append(segmentName, appendData, null, TIMEOUT).join();
segmentContents.get(segmentName).write(appendData);
lengths.put(segmentName, lengths.getOrDefault(segmentName, 0L) + appendData.length);
}
}
// 4. Reads (regular reads, not tail reads, and only for the sealed segments).
waitForOperationsInReadIndex(context.container);
for (int i = 0; i < segmentNames.size() / 2; i++) {
String segmentName = segmentNames.get(i);
long segmentLength = context.container.getStreamSegmentInfo(segmentName, false, TIMEOUT).join().getLength();
// Read starting 1 byte from the end - make sure it wont hang at the end by turning into a future read.
final int totalReadLength = 1;
long expectedCurrentOffset = segmentLength - totalReadLength;
@Cleanup ReadResult readResult = context.container.read(segmentName, expectedCurrentOffset, Integer.MAX_VALUE, TIMEOUT).join();
int readLength = 0;
while (readResult.hasNext()) {
ReadResultEntry readEntry = readResult.next();
if (readEntry.getStreamSegmentOffset() >= segmentLength) {
Assert.assertEquals("Unexpected value for isEndOfStreamSegment when reaching the end of sealed segment " + segmentName, ReadResultEntryType.EndOfStreamSegment, readEntry.getType());
AssertExtensions.assertThrows("ReadResultEntry.getContent() returned a result when reached the end of sealed segment " + segmentName, readEntry::getContent, ex -> ex instanceof IllegalStateException);
} else {
Assert.assertNotEquals("Unexpected value for isEndOfStreamSegment before reaching end of sealed segment " + segmentName, ReadResultEntryType.EndOfStreamSegment, readEntry.getType());
Assert.assertTrue("getContent() did not return a completed future for segment" + segmentName, readEntry.getContent().isDone() && !readEntry.getContent().isCompletedExceptionally());
ReadResultEntryContents readEntryContents = readEntry.getContent().join();
expectedCurrentOffset += readEntryContents.getLength();
readLength += readEntryContents.getLength();
}
}
Assert.assertEquals("Unexpected number of bytes read.", totalReadLength, readLength);
Assert.assertTrue("ReadResult was not closed when reaching the end of sealed segment" + segmentName, readResult.isClosed());
}
// 5. Writer moving data to Storage.
waitForSegmentsInStorage(segmentNames, context).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
checkStorage(segmentContents, lengths, context);
context.container.stopAsync().awaitTerminated();
}
use of io.pravega.segmentstore.contracts.StreamSegmentSealedException in project pravega by pravega.
the class StreamSegmentContainerTests method testFutureReads.
/**
* Tests the ability to perform future (tail) reads. Scenarios tested include:
* * Regular appends
* * Segment sealing
* * Transaction merging.
*/
@Test
public void testFutureReads() throws Exception {
final int nonSealReadLimit = 100;
@Cleanup TestContext context = new TestContext();
context.container.startAsync().awaitRunning();
// 1. Create the StreamSegments.
ArrayList<String> segmentNames = createSegments(context);
HashMap<String, ArrayList<String>> transactionsBySegment = createTransactions(segmentNames, context);
activateAllSegments(segmentNames, context);
transactionsBySegment.values().forEach(s -> activateAllSegments(s, context));
HashMap<String, ReadResult> readsBySegment = new HashMap<>();
ArrayList<AsyncReadResultProcessor> readProcessors = new ArrayList<>();
HashSet<String> segmentsToSeal = new HashSet<>();
HashMap<String, ByteArrayOutputStream> readContents = new HashMap<>();
HashMap<String, TestReadResultHandler> entryHandlers = new HashMap<>();
// should stop upon reaching the limit).
for (int i = 0; i < segmentNames.size(); i++) {
String segmentName = segmentNames.get(i);
ByteArrayOutputStream readContentsStream = new ByteArrayOutputStream();
readContents.put(segmentName, readContentsStream);
ReadResult readResult;
if (i < segmentNames.size() / 2) {
// We're going to seal this one at one point.
segmentsToSeal.add(segmentName);
readResult = context.container.read(segmentName, 0, Integer.MAX_VALUE, TIMEOUT).join();
} else {
// Just a regular one, nothing special.
readResult = context.container.read(segmentName, 0, nonSealReadLimit, TIMEOUT).join();
}
// The Read callback is only accumulating data in this test; we will then compare it against the real data.
TestReadResultHandler entryHandler = new TestReadResultHandler(readContentsStream, TIMEOUT);
entryHandlers.put(segmentName, entryHandler);
readsBySegment.put(segmentName, readResult);
readProcessors.add(AsyncReadResultProcessor.process(readResult, entryHandler, executorService()));
}
// 3. Add some appends.
HashMap<String, Long> lengths = new HashMap<>();
HashMap<String, ByteArrayOutputStream> segmentContents = new HashMap<>();
appendToParentsAndTransactions(segmentNames, transactionsBySegment, lengths, segmentContents, context);
// 4. Merge all the Transactions.
mergeTransactions(transactionsBySegment, lengths, segmentContents, context);
// 5. Add more appends (to the parent segments)
ArrayList<CompletableFuture<Void>> operationFutures = new ArrayList<>();
for (int i = 0; i < 5; i++) {
for (String segmentName : segmentNames) {
byte[] appendData = getAppendData(segmentName, APPENDS_PER_SEGMENT + i);
operationFutures.add(context.container.append(segmentName, appendData, null, TIMEOUT));
lengths.put(segmentName, lengths.getOrDefault(segmentName, 0L) + appendData.length);
recordAppend(segmentName, appendData, segmentContents);
}
}
segmentsToSeal.forEach(segmentName -> operationFutures.add(Futures.toVoid(context.container.sealStreamSegment(segmentName, TIMEOUT))));
Futures.allOf(operationFutures).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Now wait for all the reads to complete, and verify their results against the expected output.
Futures.allOf(entryHandlers.values().stream().map(h -> h.getCompleted()).collect(Collectors.toList())).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
readProcessors.forEach(AsyncReadResultProcessor::close);
// Check to see if any errors got thrown (and caught) during the reading process).
for (Map.Entry<String, TestReadResultHandler> e : entryHandlers.entrySet()) {
Throwable err = e.getValue().getError().get();
if (err != null) {
// The next check (see below) will verify if the segments were properly read).
if (!(err instanceof StreamSegmentSealedException && segmentsToSeal.contains(e.getKey()))) {
Assert.fail("Unexpected error happened while processing Segment " + e.getKey() + ": " + e.getValue().getError().get());
}
}
}
// Check that all the ReadResults are closed
for (Map.Entry<String, ReadResult> e : readsBySegment.entrySet()) {
Assert.assertTrue("Read result is not closed for segment " + e.getKey(), e.getValue().isClosed());
}
// Compare, byte-by-byte, the outcome of the tail reads.
Assert.assertEquals("Unexpected number of segments were read.", segmentContents.size(), readContents.size());
for (String segmentName : segmentNames) {
boolean isSealed = segmentsToSeal.contains(segmentName);
byte[] expectedData = segmentContents.get(segmentName).toByteArray();
byte[] actualData = readContents.get(segmentName).toByteArray();
int expectedLength = isSealed ? (int) (long) lengths.get(segmentName) : nonSealReadLimit;
Assert.assertEquals("Unexpected read length for segment " + segmentName, expectedLength, actualData.length);
AssertExtensions.assertArrayEquals("Unexpected read contents for segment " + segmentName, expectedData, 0, actualData, 0, actualData.length);
}
// 6. Writer moving data to Storage.
waitForSegmentsInStorage(segmentNames, context).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
checkStorage(segmentContents, lengths, context);
}
use of io.pravega.segmentstore.contracts.StreamSegmentSealedException in project pravega by pravega.
the class ContainerMetadataUpdateTransactionTests method testPreProcessMergeTransaction.
// endregion
// region MergeTransactionOperation
/**
* Tests the preProcess method with MergeTransactionOperations.
* Scenarios:
* * Recovery/non-recovery mode
* * Target StreamSegment is sealed
* * Target StreamSegment is a Transaction
* * Transaction StreamSegment is already merged
* * Transaction StreamSegment is not sealed
*/
@Test
public void testPreProcessMergeTransaction() throws Exception {
UpdateableContainerMetadata metadata = createMetadata();
// When everything is OK (recovery mode).
MergeTransactionOperation recoveryMergeOp = createMerge();
metadata.enterRecoveryMode();
val txn1 = createUpdateTransaction(metadata);
AssertExtensions.assertThrows("preProcess(Merge) handled an operation with no Transaction StreamSegment Length set.", () -> txn1.preProcessOperation(createMerge()), ex -> ex instanceof MetadataUpdateException);
// In recovery mode, the updater does not set the length; it just validates that it has one.
recoveryMergeOp.setLength(metadata.getStreamSegmentMetadata(SEALED_TRANSACTION_ID).getLength());
txn1.preProcessOperation(recoveryMergeOp);
AssertExtensions.assertLessThan("Unexpected Target StreamSegmentOffset after call to preProcess in recovery mode.", 0, recoveryMergeOp.getStreamSegmentOffset());
checkNoSequenceNumberAssigned(recoveryMergeOp, "call to preProcess in recovery mode");
Assert.assertFalse("preProcess(Merge) seems to have changed the Updater internal state in recovery mode.", txn1.getStreamSegmentMetadata(SEALED_TRANSACTION_ID).isMerged());
Assert.assertFalse("preProcess(Merge) seems to have changed the metadata in recovery mode.", metadata.getStreamSegmentMetadata(SEALED_TRANSACTION_ID).isMerged());
// When everything is OK (non-recovery mode).
MergeTransactionOperation mergeOp = createMerge();
metadata.exitRecoveryMode();
val txn2 = createUpdateTransaction(metadata);
txn2.preProcessOperation(mergeOp);
Assert.assertEquals("Unexpected Transaction StreamSegmentLength after call to preProcess in non-recovery mode.", SEALED_TRANSACTION_LENGTH, mergeOp.getLength());
Assert.assertEquals("Unexpected Target StreamSegmentOffset after call to preProcess in non-recovery mode.", SEGMENT_LENGTH, mergeOp.getStreamSegmentOffset());
checkNoSequenceNumberAssigned(mergeOp, "call to preProcess in non-recovery mode");
Assert.assertFalse("preProcess(Merge) seems to have changed the Updater internal state in non-recovery mode.", txn2.getStreamSegmentMetadata(SEALED_TRANSACTION_ID).isMerged());
Assert.assertFalse("preProcess(Merge) seems to have changed the metadata in non-recovery mode.", metadata.getStreamSegmentMetadata(SEALED_TRANSACTION_ID).isMerged());
// When Target StreamSegment is sealed.
StreamSegmentSealOperation sealTargetOp = createSeal();
txn2.preProcessOperation(sealTargetOp);
txn2.acceptOperation(sealTargetOp);
AssertExtensions.assertThrows("Unexpected behavior for preProcess(Merge) when Target StreamSegment is sealed.", () -> txn2.preProcessOperation(createMerge()), ex -> ex instanceof StreamSegmentSealedException);
// Rollback the seal
txn2.clear();
// When Target StreamSegment is a Transaction.
MergeTransactionOperation mergeToTransactionOp = new MergeTransactionOperation(NOTSEALED_TRANSACTION_ID, SEALED_TRANSACTION_ID);
AssertExtensions.assertThrows("Unexpected behavior for preProcess(Merge) when Target StreamSegment is a Transaction.", () -> txn2.preProcessOperation(mergeToTransactionOp), ex -> ex instanceof MetadataUpdateException);
// When Transaction is not sealed.
MergeTransactionOperation mergeNonSealed = new MergeTransactionOperation(NOTSEALED_TRANSACTION_ID, SEGMENT_ID);
AssertExtensions.assertThrows("Unexpected behavior for preProcess(Merge) when Transaction StreamSegment is not sealed.", () -> txn2.preProcessOperation(mergeNonSealed), ex -> ex instanceof StreamSegmentNotSealedException);
// When Transaction is already merged.
txn2.preProcessOperation(mergeOp);
txn2.acceptOperation(mergeOp);
AssertExtensions.assertThrows("Unexpected behavior for preProcess(Merge) when Transaction StreamSegment is already merged (in transaction).", () -> txn2.preProcessOperation(createMerge()), ex -> ex instanceof StreamSegmentMergedException);
txn2.commit(metadata);
AssertExtensions.assertThrows("Unexpected behavior for preProcess(Merge) when Transaction StreamSegment is already merged (in metadata).", () -> txn2.preProcessOperation(createMerge()), ex -> ex instanceof StreamSegmentMergedException);
}
use of io.pravega.segmentstore.contracts.StreamSegmentSealedException in project pravega by pravega.
the class ExtendedS3Storage method doWrite.
private Void doWrite(SegmentHandle handle, long offset, InputStream data, int length) throws StreamSegmentException {
Preconditions.checkArgument(!handle.isReadOnly(), "handle must not be read-only.");
long traceId = LoggerHelpers.traceEnter(log, "write", handle.getSegmentName(), offset, length);
SegmentProperties si = doGetStreamSegmentInfo(handle.getSegmentName());
if (si.isSealed()) {
throw new StreamSegmentSealedException(handle.getSegmentName());
}
if (si.getLength() != offset) {
throw new BadOffsetException(handle.getSegmentName(), si.getLength(), offset);
}
client.putObject(this.config.getBucket(), this.config.getRoot() + handle.getSegmentName(), Range.fromOffsetLength(offset, length), data);
LoggerHelpers.traceLeave(log, "write", traceId);
return null;
}
Aggregations