use of io.pravega.segmentstore.contracts.StreamSegmentNotExistsException in project pravega by pravega.
the class StreamSegmentStoreTestBase method checkSegmentStatus.
private void checkSegmentStatus(HashMap<String, Long> segmentLengths, HashMap<String, Long> startOffsets, boolean expectSealed, boolean expectDeleted, StreamSegmentStore store) {
for (Map.Entry<String, Long> e : segmentLengths.entrySet()) {
String segmentName = e.getKey();
if (expectDeleted) {
AssertExtensions.assertThrows("Segment '" + segmentName + "' was not deleted.", () -> store.getStreamSegmentInfo(segmentName, false, TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
} else {
SegmentProperties sp = store.getStreamSegmentInfo(segmentName, false, TIMEOUT).join();
long expectedStartOffset = startOffsets.getOrDefault(segmentName, 0L);
long expectedLength = e.getValue();
Assert.assertEquals("Unexpected Start Offset for segment " + segmentName, expectedStartOffset, sp.getStartOffset());
Assert.assertEquals("Unexpected length for segment " + segmentName, expectedLength, sp.getLength());
Assert.assertEquals("Unexpected value for isSealed for segment " + segmentName, expectSealed, sp.isSealed());
Assert.assertFalse("Unexpected value for isDeleted for segment " + segmentName, sp.isDeleted());
}
}
}
use of io.pravega.segmentstore.contracts.StreamSegmentNotExistsException in project pravega by pravega.
the class SegmentAggregatorTests method testReconcileMerge.
/**
* Tests the ability of the SegmentAggregator to reconcile MergeTransactionOperations.
*/
@Test
public void testReconcileMerge() throws Exception {
final int appendCount = 100;
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
// Create a parent segment and one transaction segment.
context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
context.segmentAggregator.initialize(TIMEOUT).join();
SegmentAggregator transactionAggregator = context.transactionAggregators[0];
context.storage.create(transactionAggregator.getMetadata().getName(), TIMEOUT).join();
transactionAggregator.initialize(TIMEOUT).join();
// Store written data by segment - so we can check it later.
ByteArrayOutputStream transactionData = new ByteArrayOutputStream();
// Add a bunch of data to the transaction.
for (int appendId = 0; appendId < appendCount; appendId++) {
StorageOperation appendOp = generateAppendAndUpdateMetadata(appendId, transactionAggregator.getMetadata().getId(), context);
transactionAggregator.add(appendOp);
getAppendData(appendOp, transactionData, context);
}
// Seal & flush everything in the transaction
transactionAggregator.add(generateSealAndUpdateMetadata(transactionAggregator.getMetadata().getId(), context));
while (transactionAggregator.mustFlush()) {
transactionAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
// The concat succeeds, but we throw some random error, indicating that it didn't.
context.storage.setConcatInterceptor((targetSegment, offset, sourceSegment, storage) -> {
storage.concat(writeHandle(targetSegment), offset, sourceSegment, TIMEOUT).join();
throw new IntentionalException(String.format("T=%s,O=%d,S=%s", targetSegment, offset, sourceSegment));
});
// Attempt to concat.
StorageOperation sealOp = generateMergeTransactionAndUpdateMetadata(transactionAggregator.getMetadata().getId(), context);
context.segmentAggregator.add(sealOp);
// First time: attempt to flush/seal, which must end in failure.
AssertExtensions.assertThrows("IntentionalException did not propagate to flush() caller.", () -> context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS), ex -> Exceptions.unwrap(ex) instanceof IntentionalException);
// Second time: we are not yet in reconcilation mode, but we are about to detect that the Transaction segment
// no longer exists
AssertExtensions.assertThrows("IntentionalException did not propagate to flush() caller.", () -> context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS), ex -> Exceptions.unwrap(ex) instanceof StreamSegmentNotExistsException);
// Third time: we should be in reconciliation mode, and we should be able to recover from it.
context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Verify outcome.
Assert.assertFalse("Unexpected value from mustFlush() after merger reconciliation.", context.segmentAggregator.mustFlush());
Assert.assertTrue("Transaction Aggregator not closed.", transactionAggregator.isClosed());
byte[] expectedData = transactionData.toByteArray();
byte[] actualData = new byte[expectedData.length];
long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
use of io.pravega.segmentstore.contracts.StreamSegmentNotExistsException in project pravega by pravega.
the class ExtendedS3Storage method doRead.
private int doRead(SegmentHandle handle, long offset, byte[] buffer, int bufferOffset, int length) throws Exception {
long traceId = LoggerHelpers.traceEnter(log, "read", handle.getSegmentName(), offset, bufferOffset, length);
if (offset < 0 || bufferOffset < 0 || length < 0) {
throw new ArrayIndexOutOfBoundsException();
}
try (InputStream reader = client.readObjectStream(config.getBucket(), config.getRoot() + handle.getSegmentName(), Range.fromOffsetLength(offset, length))) {
/*
* TODO: This implementation assumes that if S3Client.readObjectStream returns null, then
* the object does not exist and we throw StreamNotExistsException. The javadoc, however,
* says that this call returns null in case of 304 and 412 responses. We need to
* investigate what these responses mean precisely and react accordingly.
*
* See https://github.com/pravega/pravega/issues/1549
*/
if (reader == null) {
throw new StreamSegmentNotExistsException(handle.getSegmentName());
}
int bytesRead = StreamHelpers.readAll(reader, buffer, bufferOffset, length);
LoggerHelpers.traceLeave(log, "read", traceId, bytesRead);
return bytesRead;
}
}
use of io.pravega.segmentstore.contracts.StreamSegmentNotExistsException in project pravega by pravega.
the class FileSystemStorage method doOpenRead.
// endregion
// region private sync implementation
private SegmentHandle doOpenRead(String streamSegmentName) throws StreamSegmentNotExistsException {
long traceId = LoggerHelpers.traceEnter(log, "openRead", streamSegmentName);
Path path = Paths.get(config.getRoot(), streamSegmentName);
if (!Files.exists(path)) {
throw new StreamSegmentNotExistsException(streamSegmentName);
}
LoggerHelpers.traceLeave(log, "openRead", traceId, streamSegmentName);
return FileSystemSegmentHandle.readHandle(streamSegmentName);
}
use of io.pravega.segmentstore.contracts.StreamSegmentNotExistsException in project pravega by pravega.
the class FileSystemStorage method doOpenWrite.
private SegmentHandle doOpenWrite(String streamSegmentName) throws StreamSegmentNotExistsException {
long traceId = LoggerHelpers.traceEnter(log, "openWrite", streamSegmentName);
Path path = Paths.get(config.getRoot(), streamSegmentName);
if (!Files.exists(path)) {
throw new StreamSegmentNotExistsException(streamSegmentName);
} else if (Files.isWritable(path)) {
LoggerHelpers.traceLeave(log, "openWrite", traceId);
return FileSystemSegmentHandle.writeHandle(streamSegmentName);
} else {
LoggerHelpers.traceLeave(log, "openWrite", traceId);
return FileSystemSegmentHandle.readHandle(streamSegmentName);
}
}
Aggregations