use of io.pravega.segmentstore.server.DataCorruptionException in project pravega by pravega.
the class SegmentAggregator method getFlushArgs.
/**
* Returns a FlushArgs which contains the data needing to be flushed to Storage.
*
* @return The aggregated object that can be used for flushing.
* @throws DataCorruptionException If a unable to retrieve required data from the Data Source.
*/
private FlushArgs getFlushArgs() throws DataCorruptionException {
StorageOperation first = this.operations.getFirst();
if (!(first instanceof AggregatedAppendOperation)) {
// Nothing to flush - first operation is not an AggregatedAppend.
return new FlushArgs(null, 0);
}
AggregatedAppendOperation appendOp = (AggregatedAppendOperation) first;
int length = (int) appendOp.getLength();
InputStream data = this.dataSource.getAppendData(appendOp.getStreamSegmentId(), appendOp.getStreamSegmentOffset(), length);
if (data == null) {
if (this.metadata.isDeleted()) {
// Segment was deleted - nothing more to do.
return new FlushArgs(null, 0);
}
throw new DataCorruptionException(String.format("Unable to retrieve CacheContents for '%s'.", appendOp));
}
appendOp.seal();
return new FlushArgs(data, length);
}
use of io.pravega.segmentstore.server.DataCorruptionException in project pravega by pravega.
the class ContainerRecoverCommand method execute.
@Override
public void execute() throws Exception {
ensureArgCount(1);
int containerId = getIntArg(0);
@Cleanup val context = createContext();
val readIndexConfig = getCommandArgs().getState().getConfigBuilder().build().getConfig(ReadIndexConfig::builder);
// We create a special "read-only" BK log that will not be doing fencing or otherwise interfere with an active
// container. As a result, due to the nature of BK, it is possible that it may not contain all the latest writes
// since the Bookies may not have yet synchronized the LAC on the last (active ledger).
@Cleanup val log = context.logFactory.createDebugLogWrapper(containerId);
val bkLog = log.asReadOnly();
val recoveryState = new RecoveryState();
val callbacks = new DebugRecoveryProcessor.OperationCallbacks(recoveryState::newOperation, op -> recoveryState.operationComplete(op, null), recoveryState::operationComplete);
@Cleanup val rp = DebugRecoveryProcessor.create(containerId, bkLog, context.containerConfig, readIndexConfig, getCommandArgs().getState().getExecutor(), callbacks);
try {
rp.performRecovery();
output("Recovery complete: %d DataFrame(s) containing %d Operation(s).", recoveryState.dataFrameCount, recoveryState.operationCount);
} catch (Exception ex) {
output("Recovery FAILED: %d DataFrame(s) containing %d Operation(s) were able to be recovered.", recoveryState.dataFrameCount, recoveryState.operationCount);
ex.printStackTrace(getOut());
Throwable cause = Exceptions.unwrap(ex);
if (cause instanceof DataCorruptionException) {
unwrapDataCorruptionException((DataCorruptionException) cause);
}
}
}
use of io.pravega.segmentstore.server.DataCorruptionException in project pravega by pravega.
the class ContainerRecoverCommand method performRecovery.
@VisibleForTesting
public void performRecovery(int containerId) throws Exception {
@Cleanup val context = createContext();
val readIndexConfig = getCommandArgs().getState().getConfigBuilder().build().getConfig(ReadIndexConfig::builder);
// We create a special "read-only" BK log that will not be doing fencing or otherwise interfere with an active
// container. As a result, due to the nature of BK, it is possible that it may not contain all the latest writes
// since the Bookies may not have yet synchronized the LAC on the last (active ledger).
@Cleanup val log = context.logFactory.createDebugLogWrapper(containerId);
val bkLog = log.asReadOnly();
val recoveryState = new RecoveryState();
val callbacks = new DebugRecoveryProcessor.OperationCallbacks(recoveryState::newOperation, // We want to perform the actual recovery.
op -> true, op -> recoveryState.operationComplete(op, null), recoveryState::operationComplete);
@Cleanup val rp = DebugRecoveryProcessor.create(containerId, bkLog, context.containerConfig, readIndexConfig, getCommandArgs().getState().getExecutor(), callbacks);
try {
rp.performRecovery();
output("Recovery complete: %d DataFrame(s) containing %d Operation(s).", recoveryState.dataFrameCount, recoveryState.operationCount);
} catch (Exception ex) {
output("Recovery FAILED: %d DataFrame(s) containing %d Operation(s) were able to be recovered.", recoveryState.dataFrameCount, recoveryState.operationCount);
ex.printStackTrace(getOut());
Throwable cause = Exceptions.unwrap(ex);
if (cause instanceof DataCorruptionException) {
unwrapDataCorruptionException((DataCorruptionException) cause);
}
if (throwWhenExceptionFound()) {
throw ex;
}
}
}
use of io.pravega.segmentstore.server.DataCorruptionException in project pravega by pravega.
the class WriterTableProcessorTests method testAdd.
/**
* Tests the {@link WriterTableProcessor#add} method and other general (non-flush) methods.
*/
@Test
public void testAdd() throws Exception {
@Cleanup val context = new TestContext();
Assert.assertFalse("Unexpected value from isClosed.", context.processor.isClosed());
Assert.assertFalse("Unexpected value from mustFlush.", context.processor.mustFlush());
Assert.assertEquals("Unexpected LUSN when no data in.", Operation.NO_SEQUENCE_NUMBER, context.processor.getLowestUncommittedSequenceNumber());
// Mismatched segment ids.
AssertExtensions.assertThrows("add() worked with wrong segment id.", () -> context.processor.add(new StreamSegmentAppendOperation(SEGMENT_ID + 1, BufferView.empty(), null)), ex -> ex instanceof IllegalArgumentException);
// Pre-last indexed offset.
context.processor.add(generateRandomEntryAppend(0, context));
Assert.assertFalse("Unexpected value from mustFlush after ignored add().", context.processor.mustFlush());
Assert.assertEquals("Unexpected LUSN after ignored add().", Operation.NO_SEQUENCE_NUMBER, context.processor.getLowestUncommittedSequenceNumber());
// Post-last indexed offset (not allowing gaps)
AssertExtensions.assertThrows("add() allowed first append to be after the last indexed offset.", () -> context.processor.add(generateRandomEntryAppend(INITIAL_LAST_INDEXED_OFFSET + 1, context)), ex -> ex instanceof DataCorruptionException);
// Non-contiguous appends.
val validAppend = generateRandomEntryAppend(INITIAL_LAST_INDEXED_OFFSET, context);
context.processor.add(validAppend);
Assert.assertTrue("Unexpected value from mustFlush after valid add().", context.processor.mustFlush());
Assert.assertEquals("Unexpected LUSN after valid add().", validAppend.getSequenceNumber(), context.processor.getLowestUncommittedSequenceNumber());
AssertExtensions.assertThrows("add() allowed non-contiguous appends.", () -> context.processor.add(generateRandomEntryAppend(validAppend.getLastStreamSegmentOffset() + 1, context)), ex -> ex instanceof DataCorruptionException);
// Delete the segment.
context.metadata.markDeleted();
Assert.assertFalse("Unexpected value from mustFlush after deletion.", context.processor.mustFlush());
context.processor.add(generateRandomEntryAppend(validAppend.getLastStreamSegmentOffset(), context));
Assert.assertEquals("Unexpected LUSN after ignored append due to deletion.", validAppend.getSequenceNumber(), context.processor.getLowestUncommittedSequenceNumber());
// Close the processor and verify.
context.processor.close();
Assert.assertTrue("Unexpected value from isClosed after closing.", context.processor.isClosed());
AssertExtensions.assertThrows("add() worked after closing.", () -> context.processor.add(generateRandomEntryAppend(validAppend.getLastStreamSegmentOffset(), context)), ex -> ex instanceof ObjectClosedException);
}
use of io.pravega.segmentstore.server.DataCorruptionException in project pravega by pravega.
the class SegmentAggregatorTests method testAddWithBadInput.
/**
* Tests the add() method with invalid arguments.
*/
@Test
public void testAddWithBadInput() throws Exception {
final long badTransactionId = 12345;
final long badParentId = 56789;
final String badParentName = "Foo_Parent";
final String badTransactionName = "Foo_Transaction";
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
// We only needs one Transaction for this test.
SegmentAggregator transactionAggregator = context.transactionAggregators[0];
SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
context.segmentAggregator.initialize(TIMEOUT).join();
transactionAggregator.initialize(TIMEOUT).join();
// Create 2 more segments that can be used to verify MergeSegmentOperation.
context.containerMetadata.mapStreamSegmentId(badParentName, badParentId);
UpdateableSegmentMetadata badTransactionMetadata = context.containerMetadata.mapStreamSegmentId(badTransactionName, badTransactionId);
badTransactionMetadata.setLength(0);
badTransactionMetadata.setStorageLength(0);
// 1. MergeSegmentOperation
// Verify that MergeSegmentOperation cannot be added to the Segment to be merged.
AssertExtensions.assertThrows("add() allowed a MergeSegmentOperation on the Transaction segment.", () -> transactionAggregator.add(generateSimpleMergeTransaction(transactionMetadata.getId(), context)), ex -> ex instanceof IllegalArgumentException);
// 2. StreamSegmentSealOperation.
// 2a. Verify we cannot add a StreamSegmentSealOperation if the segment is not sealed yet.
AssertExtensions.assertThrows("add() allowed a StreamSegmentSealOperation for a non-sealed segment.", () -> {
@Cleanup SegmentAggregator badTransactionAggregator = new SegmentAggregator(badTransactionMetadata, context.dataSource, context.storage, DEFAULT_CONFIG, context.timer, executorService());
badTransactionAggregator.initialize(TIMEOUT).join();
badTransactionAggregator.add(generateSimpleSeal(badTransactionId, context));
}, ex -> ex instanceof DataCorruptionException);
// 2b. Verify that nothing is allowed after Seal (after adding one append to and sealing the Transaction Segment).
StorageOperation transactionAppend1 = generateAppendAndUpdateMetadata(0, transactionMetadata.getId(), context);
transactionAggregator.add(transactionAppend1);
transactionAggregator.add(generateSealAndUpdateMetadata(transactionMetadata.getId(), context));
AssertExtensions.assertThrows("add() allowed operation after seal.", () -> transactionAggregator.add(generateSimpleAppend(transactionMetadata.getId(), context)), ex -> ex instanceof DataCorruptionException);
// 3. CachedStreamSegmentAppendOperation.
final StorageOperation parentAppend1 = generateAppendAndUpdateMetadata(0, SEGMENT_ID, context);
// 3a. Verify we cannot add StreamSegmentAppendOperations.
AssertExtensions.assertThrows("add() allowed a StreamSegmentAppendOperation.", () -> {
// We have the correct offset, but we did not increase the Length.
StreamSegmentAppendOperation badAppend = new StreamSegmentAppendOperation(parentAppend1.getStreamSegmentId(), parentAppend1.getStreamSegmentOffset(), new ByteArraySegment(new byte[(int) parentAppend1.getLength()]), null);
context.segmentAggregator.add(badAppend);
}, ex -> ex instanceof IllegalArgumentException);
// Add this one append to the parent (nothing unusual here); we'll use this for the next tests.
context.segmentAggregator.add(parentAppend1);
// 3b. Verify we cannot add anything beyond the DurableLogOffset (offset or offset+length).
val appendData = new ByteArraySegment("foo".getBytes());
AssertExtensions.assertThrows("add() allowed an operation beyond the DurableLogOffset (offset).", () -> {
// We have the correct offset, but we did not increase the Length.
StreamSegmentAppendOperation badAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), appendData, null);
badAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badAppend));
}, ex -> ex instanceof DataCorruptionException);
((UpdateableSegmentMetadata) context.segmentAggregator.getMetadata()).setLength(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength() + 1);
AssertExtensions.assertThrows("add() allowed an operation beyond the DurableLogOffset (offset+length).", () -> {
// We have the correct offset, but we the append exceeds the Length by 1 byte.
StreamSegmentAppendOperation badAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), appendData, null);
badAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badAppend));
}, ex -> ex instanceof DataCorruptionException);
// 3c. Verify contiguity (offsets - we cannot have gaps in the data).
AssertExtensions.assertThrows("add() allowed an operation with wrong offset (too small).", () -> {
StreamSegmentAppendOperation badOffsetAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), appendData, null);
badOffsetAppend.setStreamSegmentOffset(0);
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badOffsetAppend));
}, ex -> ex instanceof DataCorruptionException);
AssertExtensions.assertThrows("add() allowed an operation with wrong offset (too large).", () -> {
StreamSegmentAppendOperation badOffsetAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), appendData, null);
badOffsetAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength() + 1);
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badOffsetAppend));
}, ex -> ex instanceof DataCorruptionException);
AssertExtensions.assertThrows("add() allowed an operation with wrong offset (too large, but no pending operations).", () -> {
@Cleanup SegmentAggregator badTransactionAggregator = new SegmentAggregator(badTransactionMetadata, context.dataSource, context.storage, DEFAULT_CONFIG, context.timer, executorService());
badTransactionMetadata.setLength(100);
badTransactionAggregator.initialize(TIMEOUT).join();
StreamSegmentAppendOperation badOffsetAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), appendData, null);
badOffsetAppend.setStreamSegmentOffset(1);
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badOffsetAppend));
}, ex -> ex instanceof DataCorruptionException);
// 4. Verify Segment Id match.
AssertExtensions.assertThrows("add() allowed an Append operation with wrong Segment Id.", () -> {
StreamSegmentAppendOperation badIdAppend = new StreamSegmentAppendOperation(Integer.MAX_VALUE, appendData, null);
badIdAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badIdAppend));
}, ex -> ex instanceof IllegalArgumentException);
AssertExtensions.assertThrows("add() allowed a StreamSegmentSealOperation with wrong SegmentId.", () -> {
StreamSegmentSealOperation badIdSeal = new StreamSegmentSealOperation(Integer.MAX_VALUE);
badIdSeal.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
context.segmentAggregator.add(badIdSeal);
}, ex -> ex instanceof IllegalArgumentException);
AssertExtensions.assertThrows("add() allowed a MergeSegmentOperation with wrong SegmentId.", () -> {
MergeSegmentOperation badIdMerge = new MergeSegmentOperation(Integer.MAX_VALUE, transactionMetadata.getId());
badIdMerge.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
badIdMerge.setLength(1);
context.segmentAggregator.add(badIdMerge);
}, ex -> ex instanceof IllegalArgumentException);
// 5. Truncations.
AssertExtensions.assertThrows("add() allowed a StreamSegmentTruncateOperation with a truncation offset beyond the one in the metadata.", () -> {
StreamSegmentTruncateOperation op = new StreamSegmentTruncateOperation(SEGMENT_ID, 10);
op.setSequenceNumber(context.containerMetadata.nextOperationSequenceNumber());
context.segmentAggregator.add(op);
}, ex -> ex instanceof DataCorruptionException);
}
Aggregations