use of io.pravega.segmentstore.contracts.BadOffsetException in project pravega by pravega.
the class FileSystemStorage method doWrite.
private Void doWrite(SegmentHandle handle, long offset, InputStream data, int length) throws Exception {
long traceId = LoggerHelpers.traceEnter(log, "write", handle.getSegmentName(), offset, length);
Timer timer = new Timer();
if (handle.isReadOnly()) {
throw new IllegalArgumentException("Write called on a readonly handle of segment " + handle.getSegmentName());
}
Path path = Paths.get(config.getRoot(), handle.getSegmentName());
// This means that writes to readonly files also succeed. We need to explicitly check permissions in this case.
if (!isWritableFile(path)) {
throw new StreamSegmentSealedException(handle.getSegmentName());
}
long fileSize = path.toFile().length();
if (fileSize < offset) {
throw new BadOffsetException(handle.getSegmentName(), fileSize, offset);
} else {
long totalBytesWritten = 0;
try (FileChannel channel = FileChannel.open(path, StandardOpenOption.WRITE)) {
// Wrap the input data into a ReadableByteChannel, but do not close it. Doing so will result in closing
// the underlying InputStream, which is not desirable if it is to be reused.
ReadableByteChannel sourceChannel = Channels.newChannel(data);
while (length != 0) {
long bytesWritten = channel.transferFrom(sourceChannel, offset, length);
assert bytesWritten > 0 : "Unable to make any progress transferring data.";
offset += bytesWritten;
totalBytesWritten += bytesWritten;
length -= bytesWritten;
}
}
FileSystemMetrics.WRITE_LATENCY.reportSuccessEvent(timer.getElapsed());
FileSystemMetrics.WRITE_BYTES.add(totalBytesWritten);
LoggerHelpers.traceLeave(log, "write", traceId);
return null;
}
}
use of io.pravega.segmentstore.contracts.BadOffsetException in project pravega by pravega.
the class ContainerMetadataUpdateTransactionTests method testAcceptStreamSegmentTruncate.
/**
* Tests the acceptOperation method with StreamSegmentTruncate operations.
*/
@Test
public void testAcceptStreamSegmentTruncate() throws Exception {
val metadata = createMetadata();
val append = createAppendNoOffset();
// Here, we also Seal, since in preProcessStreamSegmentTruncate we did not.
val seal = createSeal();
final long truncateOffset = SEGMENT_LENGTH + append.getLength() / 2;
val truncate = createTruncate(truncateOffset);
// Apply all operations in order, in the same transaction. This helps verify that, should these operations happen
// concurrently, they are applied to the metadata in the correct order.
val txn1 = createUpdateTransaction(metadata);
for (Operation o : Arrays.asList(append, seal, truncate)) {
txn1.preProcessOperation(o);
txn1.acceptOperation(o);
}
// Attempt some more invalid truncate operations.
AssertExtensions.assertThrows("preProcessOperation accepted a truncate operation with wrong offset (smaller).", () -> txn1.preProcessOperation(createTruncate(truncateOffset - 1)), ex -> ex instanceof BadOffsetException);
AssertExtensions.assertThrows("preProcessOperation accepted a truncate operation with wrong offset (larger).", () -> txn1.preProcessOperation(createTruncate(truncateOffset + append.getLength())), ex -> ex instanceof BadOffsetException);
// Verify the Update Transaction has been updated, but the metadata has not yet been touched.
val sm = metadata.getStreamSegmentMetadata(SEGMENT_ID);
Assert.assertEquals("Unexpected StartOffset in UpdateTransaction.", truncateOffset, txn1.getStreamSegmentMetadata(SEGMENT_ID).getStartOffset());
Assert.assertEquals("Unexpected StartOffset in Metadata pre-commit.", 0, sm.getStartOffset());
// Commit and verify that the metadata has been correctly updated.
txn1.commit(metadata);
Assert.assertEquals("Unexpected StartOffset in Metadata post-commit.", truncateOffset, sm.getStartOffset());
Assert.assertEquals("Unexpected Length in Metadata post-commit.", append.getStreamSegmentOffset() + append.getLength(), sm.getLength());
Assert.assertTrue("Unexpected Sealed status in Metadata post-commit.", sm.isSealed());
// Verify single truncate operation (check to see that it reads from actual metadata if needed).
val op2 = createTruncate(truncateOffset + 1);
val txn2 = createUpdateTransaction(metadata);
txn2.preProcessOperation(op2);
txn2.acceptOperation(op2);
txn2.commit(metadata);
Assert.assertEquals("Unexpected StartOffset in Metadata post-commit (second).", op2.getStreamSegmentOffset(), sm.getStartOffset());
// Verify truncating the entire segment.
val op3 = createTruncate(sm.getLength());
val txn3 = createUpdateTransaction(metadata);
txn3.preProcessOperation(op3);
txn3.acceptOperation(op3);
txn3.commit(metadata);
Assert.assertEquals("Unexpected StartOffset in Metadata when truncating entire segment.", sm.getLength(), sm.getStartOffset());
}
use of io.pravega.segmentstore.contracts.BadOffsetException in project pravega by pravega.
the class ContainerMetadataUpdateTransactionTests method testPreProcessStreamSegmentTruncate.
// endregion
// region StreamSegmentTruncate
/**
* Tests the preProcess method with StreamSegmentTruncate operations.
* Scenarios:
* * Recovery Mode
* * Non-recovery mode
* * Invalid states or arguments (Segment not sealed, bad offsets, Transaction Segment).
*/
@Test
public void testPreProcessStreamSegmentTruncate() throws Exception {
final UpdateableContainerMetadata metadata = createMetadata();
// When trying to truncate beyond last offset.
val txn = createUpdateTransaction(metadata);
AssertExtensions.assertThrows("preProcess did not throw when offset is too large.", () -> txn.preProcessOperation(createTruncate(SEGMENT_LENGTH + 1)), ex -> ex instanceof BadOffsetException);
// Actually truncate the segment, and re-verify bounds.
val op1 = createTruncate(SEGMENT_LENGTH / 2);
txn.preProcessOperation(op1);
txn.acceptOperation(op1);
txn.commit(metadata);
AssertExtensions.assertThrows("preProcess did not throw when offset is too small (on truncated segment).", () -> txn.preProcessOperation(createTruncate(op1.getStreamSegmentOffset() - 1)), ex -> ex instanceof BadOffsetException);
AssertExtensions.assertThrows("preProcess did not throw when offset is too large (on truncated segment).", () -> txn.preProcessOperation(createTruncate(SEGMENT_LENGTH + 1)), ex -> ex instanceof BadOffsetException);
// For a transaction
AssertExtensions.assertThrows("preProcess did not throw for a Transaction Segment.", () -> txn.preProcessOperation(new StreamSegmentTruncateOperation(SEALED_TRANSACTION_ID, SEALED_TRANSACTION_LENGTH / 2)), ex -> ex instanceof MetadataUpdateException);
// Now verify that a valid offset does work (not throwing means the test passes).
txn.preProcessOperation(createTruncate(op1.getStreamSegmentOffset()));
txn.preProcessOperation(createTruncate(op1.getStreamSegmentOffset() + 1));
txn.preProcessOperation(createTruncate(SEGMENT_LENGTH));
}
use of io.pravega.segmentstore.contracts.BadOffsetException in project pravega by pravega.
the class ContainerMetadataUpdateTransactionTests method testStreamSegmentAppendWithOffset.
/**
* Tests the ability of the ContainerMetadataUpdateTransaction to process (and accept) StreamSegmentAppendOperations with
* predefined offsets.
*/
@Test
public void testStreamSegmentAppendWithOffset() throws Exception {
UpdateableContainerMetadata metadata = createMetadata();
val txn = createUpdateTransaction(metadata);
// Append #1 (at offset 0).
long offset = metadata.getStreamSegmentMetadata(SEGMENT_ID).getLength();
StreamSegmentAppendOperation appendOp = createAppendWithOffset(offset);
txn.preProcessOperation(appendOp);
Assert.assertEquals("Unexpected StreamSegmentOffset after call to preProcess in non-recovery mode.", offset, appendOp.getStreamSegmentOffset());
checkNoSequenceNumberAssigned(appendOp, "call to preProcess in non-recovery mode");
Assert.assertEquals("preProcess(Append) seems to have changed the Updater internal state.", offset, txn.getStreamSegmentMetadata(SEGMENT_ID).getLength());
Assert.assertEquals("preProcess(Append) seems to have changed the metadata.", offset, metadata.getStreamSegmentMetadata(SEGMENT_ID).getLength());
txn.acceptOperation(appendOp);
// Append #2 (after Append #1)
offset = appendOp.getStreamSegmentOffset() + appendOp.getLength();
appendOp = createAppendWithOffset(offset);
txn.preProcessOperation(appendOp);
Assert.assertEquals("Unexpected StreamSegmentOffset after call to preProcess in non-recovery mode.", offset, appendOp.getStreamSegmentOffset());
checkNoSequenceNumberAssigned(appendOp, "call to preProcess in non-recovery mode");
Assert.assertEquals("preProcess(Append) seems to have changed the Updater internal state.", offset, txn.getStreamSegmentMetadata(SEGMENT_ID).getLength());
Assert.assertEquals("preProcess(Append) seems to have changed the metadata.", SEGMENT_LENGTH, metadata.getStreamSegmentMetadata(SEGMENT_ID).getLength());
txn.acceptOperation(appendOp);
// Append #3 (wrong offset)
offset = appendOp.getStreamSegmentOffset() + appendOp.getLength() - 1;
StreamSegmentAppendOperation badAppendOp = createAppendWithOffset(offset);
AssertExtensions.assertThrows("preProcessOperations accepted an append with the wrong offset.", () -> txn.preProcessOperation(badAppendOp), ex -> ex instanceof BadOffsetException);
AssertExtensions.assertThrows("acceptOperation accepted an append that was rejected during preProcessing.", () -> txn.acceptOperation(badAppendOp), ex -> ex instanceof MetadataUpdateException);
}
use of io.pravega.segmentstore.contracts.BadOffsetException in project pravega by pravega.
the class AppendProcessor method handleAppendResult.
private void handleAppendResult(final Append append, Throwable exception) {
try {
boolean conditionalFailed = exception != null && (Exceptions.unwrap(exception) instanceof BadOffsetException);
long previousEventNumber;
synchronized (lock) {
previousEventNumber = latestEventNumbers.get(Pair.of(append.getSegment(), append.getWriterId()));
Preconditions.checkState(outstandingAppend == append, "Synchronization error in: %s while processing append: %s.", AppendProcessor.this.getClass().getName(), append);
}
if (exception != null) {
if (conditionalFailed) {
log.debug("Conditional append failed due to incorrect offset: {}, {}", append, exception.getMessage());
connection.send(new ConditionalCheckFailed(append.getWriterId(), append.getEventNumber()));
} else {
handleException(append.getWriterId(), append.getEventNumber(), append.getSegment(), "appending data", exception);
}
} else {
if (statsRecorder != null) {
statsRecorder.record(append.getSegment(), append.getDataLength(), append.getEventCount());
}
final DataAppended dataAppendedAck = new DataAppended(append.getWriterId(), append.getEventNumber(), previousEventNumber);
log.trace("Sending DataAppended : {}", dataAppendedAck);
connection.send(dataAppendedAck);
DYNAMIC_LOGGER.incCounterValue(nameFromSegment(SEGMENT_WRITE_BYTES, append.getSegment()), append.getDataLength());
DYNAMIC_LOGGER.incCounterValue(nameFromSegment(SEGMENT_WRITE_EVENTS, append.getSegment()), append.getEventCount());
}
/* Reply (DataAppended in case of success, else an error Reply based on exception) has been sent. Next,
* - clear outstandingAppend to handle the next Append message.
* - ensure latestEventNumbers and waitingAppends are updated.
*/
synchronized (lock) {
Preconditions.checkState(outstandingAppend == append, "Synchronization error in: %s while processing append: %s.", AppendProcessor.this.getClass().getName(), append);
outstandingAppend = null;
if (exception == null) {
latestEventNumbers.put(Pair.of(append.getSegment(), append.getWriterId()), append.getEventNumber());
} else {
if (!conditionalFailed) {
waitingAppends.removeAll(append.getWriterId());
latestEventNumbers.remove(Pair.of(append.getSegment(), append.getWriterId()));
}
}
}
pauseOrResumeReading();
performNextWrite();
} catch (Throwable e) {
handleException(append.getWriterId(), append.getEventNumber(), append.getSegment(), "handling append result", e);
}
}
Aggregations