use of io.pravega.segmentstore.contracts.StreamSegmentNotExistsException in project pravega by pravega.
the class ContainerMetadataUpdateTransactionTests method testPreProcessAndAcceptWithInvalidSegmentId.
// endregion
// region Other tests
/**
* Tests the behavior of preProcessOperation and acceptOperation when encountering an invalid StreamSegmentId, or
* when encountering a StreamSegment Id for a deleted StreamSegment.
*/
@Test
public void testPreProcessAndAcceptWithInvalidSegmentId() throws Exception {
UpdateableContainerMetadata metadata = createBlankMetadata();
val txn = createUpdateTransaction(metadata);
ArrayList<StorageOperation> testOperations = new ArrayList<>();
testOperations.add(createAppendNoOffset());
testOperations.add(createSeal());
testOperations.add(createMerge());
for (StorageOperation op : testOperations) {
AssertExtensions.assertThrows("Unexpected behavior from preProcessOperation when processing an operation for a non-existent Segment: " + op, () -> txn.preProcessOperation(op), ex -> ex instanceof MetadataUpdateException);
AssertExtensions.assertThrows("Unexpected behavior from acceptOperation when processing an operation for a non-existent Segment: " + op, () -> txn.acceptOperation(op), ex -> ex instanceof MetadataUpdateException);
}
// If the StreamSegment was previously marked as deleted.
UpdateableSegmentMetadata segmentMetadata = metadata.mapStreamSegmentId("foo", SEGMENT_ID);
segmentMetadata.markDeleted();
for (StorageOperation op : testOperations) {
AssertExtensions.assertThrows("Unexpected behavior from preProcessOperation when processing an operation for deleted Segment: " + op, () -> txn.preProcessOperation(op), ex -> ex instanceof StreamSegmentNotExistsException);
}
}
use of io.pravega.segmentstore.contracts.StreamSegmentNotExistsException in project pravega by pravega.
the class StreamSegmentStoreTestBase method checkStorage.
private static void checkStorage(HashMap<String, ByteArrayOutputStream> segmentContents, StreamSegmentStore baseStore, StreamSegmentStore readOnlySegmentStore) throws Exception {
for (Map.Entry<String, ByteArrayOutputStream> e : segmentContents.entrySet()) {
String segmentName = e.getKey();
byte[] expectedData = e.getValue().toByteArray();
// 1. Deletion status
SegmentProperties sp = null;
try {
sp = baseStore.getStreamSegmentInfo(segmentName, false, TIMEOUT).join();
} catch (Exception ex) {
if (!(Exceptions.unwrap(ex) instanceof StreamSegmentNotExistsException)) {
throw ex;
}
}
if (sp == null) {
AssertExtensions.assertThrows("Segment is marked as deleted in SegmentStore but was not deleted in Storage " + segmentName, () -> readOnlySegmentStore.getStreamSegmentInfo(segmentName, false, TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
// No need to do other checks.
continue;
}
// 2. Seal Status
SegmentProperties storageProps = readOnlySegmentStore.getStreamSegmentInfo(segmentName, false, TIMEOUT).join();
Assert.assertEquals("Segment seal status disagree between Store and Storage for segment " + segmentName, sp.isSealed(), storageProps.isSealed());
// 3. Contents.
SegmentProperties metadataProps = baseStore.getStreamSegmentInfo(segmentName, false, TIMEOUT).join();
Assert.assertEquals("Unexpected Storage length for segment " + segmentName, expectedData.length, storageProps.getLength());
byte[] actualData = new byte[expectedData.length];
int actualLength = 0;
int expectedLength = actualData.length;
try {
@Cleanup ReadResult readResult = readOnlySegmentStore.read(segmentName, 0, actualData.length, TIMEOUT).join();
actualLength = readResult.readRemaining(actualData, TIMEOUT);
} catch (Exception ex) {
ex = (Exception) Exceptions.unwrap(ex);
if (!(ex instanceof StreamSegmentTruncatedException) || metadataProps.getStartOffset() == 0) {
// We encountered an unexpected Exception, or a Truncated Segment which was not expected to be truncated.
throw ex;
}
// Read from the truncated point, except if the whole segment got truncated.
expectedLength = (int) (storageProps.getLength() - metadataProps.getStartOffset());
if (metadataProps.getStartOffset() < storageProps.getLength()) {
@Cleanup ReadResult readResult = readOnlySegmentStore.read(segmentName, metadataProps.getStartOffset(), expectedLength, TIMEOUT).join();
actualLength = readResult.readRemaining(actualData, TIMEOUT);
}
}
Assert.assertEquals("Unexpected number of bytes read from Storage for segment " + segmentName, expectedLength, actualLength);
AssertExtensions.assertArrayEquals("Unexpected data written to storage for segment " + segmentName, expectedData, expectedData.length - expectedLength, actualData, 0, expectedLength);
}
}
use of io.pravega.segmentstore.contracts.StreamSegmentNotExistsException in project pravega by pravega.
the class ExtendedS3Storage method doConcat.
/**
* The concat is implemented using extended S3 implementation of multipart copy API. Please see here for
* more detail on multipart copy:
* http://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingLLJavaMPUapi.html
*
* The multipart copy is an atomic operation. We schedule two parts and commit them atomically using
* completeMultiPartUpload call. Specifically, to concatenate, we are copying the target segment T and the
* source segment S to T, so essentially we are doing T <- T + S.
*/
private Void doConcat(SegmentHandle targetHandle, long offset, String sourceSegment) throws StreamSegmentNotExistsException {
Preconditions.checkArgument(!targetHandle.isReadOnly(), "target handle must not be read-only.");
long traceId = LoggerHelpers.traceEnter(log, "concat", targetHandle.getSegmentName(), offset, sourceSegment);
SortedSet<MultipartPartETag> partEtags = new TreeSet<>();
String targetPath = config.getRoot() + targetHandle.getSegmentName();
String uploadId = client.initiateMultipartUpload(config.getBucket(), targetPath);
// check whether the target exists
if (!doExists(targetHandle.getSegmentName())) {
throw new StreamSegmentNotExistsException(targetHandle.getSegmentName());
}
// check whether the source is sealed
SegmentProperties si = doGetStreamSegmentInfo(sourceSegment);
Preconditions.checkState(si.isSealed(), "Cannot concat segment '%s' into '%s' because it is not sealed.", sourceSegment, targetHandle.getSegmentName());
// Copy the first part
CopyPartRequest copyRequest = new CopyPartRequest(config.getBucket(), targetPath, config.getBucket(), targetPath, uploadId, 1).withSourceRange(Range.fromOffsetLength(0, offset));
CopyPartResult copyResult = client.copyPart(copyRequest);
partEtags.add(new MultipartPartETag(copyResult.getPartNumber(), copyResult.getETag()));
// Copy the second part
S3ObjectMetadata metadataResult = client.getObjectMetadata(config.getBucket(), config.getRoot() + sourceSegment);
// in bytes
long objectSize = metadataResult.getContentLength();
copyRequest = new CopyPartRequest(config.getBucket(), config.getRoot() + sourceSegment, config.getBucket(), targetPath, uploadId, 2).withSourceRange(Range.fromOffsetLength(0, objectSize));
copyResult = client.copyPart(copyRequest);
partEtags.add(new MultipartPartETag(copyResult.getPartNumber(), copyResult.getETag()));
// Close the upload
client.completeMultipartUpload(new CompleteMultipartUploadRequest(config.getBucket(), targetPath, uploadId).withParts(partEtags));
client.deleteObject(config.getBucket(), config.getRoot() + sourceSegment);
LoggerHelpers.traceLeave(log, "concat", traceId);
return null;
}
use of io.pravega.segmentstore.contracts.StreamSegmentNotExistsException in project pravega by pravega.
the class StreamSegmentContainer method mergeTransaction.
@Override
public CompletableFuture<Void> mergeTransaction(String transactionName, Duration timeout) {
ensureRunning();
logRequest("mergeTransaction", transactionName);
this.metrics.mergeTxn();
TimeoutTimer timer = new TimeoutTimer(timeout);
return this.segmentMapper.getOrAssignStreamSegmentId(transactionName, timer.getRemaining(), transactionId -> {
SegmentMetadata transactionMetadata = this.metadata.getStreamSegmentMetadata(transactionId);
if (transactionMetadata == null) {
throw new CompletionException(new StreamSegmentNotExistsException(transactionName));
}
Operation op = new MergeTransactionOperation(transactionMetadata.getParentId(), transactionMetadata.getId());
return this.durableLog.add(op, timer.getRemaining());
}).thenComposeAsync(v -> this.stateStore.remove(transactionName, timer.getRemaining()), this.executor);
}
use of io.pravega.segmentstore.contracts.StreamSegmentNotExistsException in project pravega by pravega.
the class StreamSegmentMapper method submitToOperationLog.
/**
* Submits a StreamSegmentMapOperation to the OperationLog. Upon completion, this operation
* will have mapped the given Segment to a new internal Segment Id if none was provided in the given SegmentInfo.
* If the given SegmentInfo already has a SegmentId set, then all efforts will be made to map that Segment with the
* requested Segment Id.
*
* @param segmentInfo The SegmentInfo for the StreamSegment to generate and persist.
* @param parentStreamSegmentId If different from ContainerMetadata.NO_STREAM_SEGMENT_ID, the given streamSegmentInfo
* will be mapped as a transaction. Otherwise, this will be registered as a standalone StreamSegment.
* @param timeout Timeout for the operation.
* @return A CompletableFuture that, when completed, will contain the internal SegmentId that was assigned (or the
* one supplied via SegmentInfo, if any). If the operation failed, then this Future will complete with that exception.
*/
private CompletableFuture<Long> submitToOperationLog(SegmentInfo segmentInfo, long parentStreamSegmentId, Duration timeout) {
SegmentProperties properties = segmentInfo.getProperties();
if (properties.isDeleted()) {
// Stream does not exist. Fail the request with the appropriate exception.
failAssignment(properties.getName(), new StreamSegmentNotExistsException("StreamSegment does not exist."));
return Futures.failedFuture(new StreamSegmentNotExistsException(properties.getName()));
}
long existingSegmentId = this.containerMetadata.getStreamSegmentId(properties.getName(), true);
if (isValidStreamSegmentId(existingSegmentId)) {
// Looks like someone else beat us to it.
completeAssignment(properties.getName(), existingSegmentId);
return CompletableFuture.completedFuture(existingSegmentId);
} else {
StreamSegmentMapOperation op;
if (isValidStreamSegmentId(parentStreamSegmentId)) {
// Transaction.
SegmentMetadata parentMetadata = this.containerMetadata.getStreamSegmentMetadata(parentStreamSegmentId);
assert parentMetadata != null : "parentMetadata is null";
op = new StreamSegmentMapOperation(parentStreamSegmentId, properties);
} else {
// Standalone StreamSegment.
op = new StreamSegmentMapOperation(properties);
}
if (segmentInfo.getSegmentId() != ContainerMetadata.NO_STREAM_SEGMENT_ID) {
op.setStreamSegmentId(segmentInfo.getSegmentId());
}
return this.durableLog.add(op, timeout).thenApply(seqNo -> completeAssignment(properties.getName(), op.getStreamSegmentId()));
}
}
Aggregations