use of io.pravega.segmentstore.contracts.BadOffsetException in project pravega by pravega.
the class WriteOperationTests method testNormalWrite.
/**
* Tests a normal write across many epochs.
*/
@Test
public void testNormalWrite() throws Exception {
val rnd = new Random(0);
@Cleanup val fs = new MockFileSystem();
new CreateOperation(SEGMENT_NAME, newContext(0, fs)).call();
int offset = 0;
val writtenData = new ByteArrayOutputStream();
List<FileDescriptor> files = null;
for (int fileId = 0; fileId < FILE_COUNT; fileId++) {
val context = newContext(fileId, fs);
val handle = new OpenWriteOperation(SEGMENT_NAME, context).call();
files = handle.getFiles();
for (int writeId = 0; writeId < WRITES_PER_FILE; writeId++) {
byte[] data = new byte[WRITE_SIZE];
rnd.nextBytes(data);
// BadOffset write.
AssertExtensions.assertThrows("WriteOperation allowed writing at wrong offset.", new WriteOperation(handle, offset + 1, new ByteArrayInputStream(data), data.length, context)::run, ex -> ex instanceof BadOffsetException);
// Successful write.
new WriteOperation(handle, offset, new ByteArrayInputStream(data), data.length, context).run();
writtenData.write(data);
offset += data.length;
// Zero-length write (should succeed, but be a no-op.
new WriteOperation(handle, offset, new ByteArrayInputStream(data), 0, context).run();
}
}
// Check written data via file system reads. ReadOperationTests verifies the same using ReadOperations.
byte[] expectedData = writtenData.toByteArray();
int expectedDataOffset = 0;
for (int i = 0; i < files.size(); i++) {
FileDescriptor f = files.get(i);
int len = (int) fs.getFileStatus(f.getPath()).getLen();
int expectedLen = WRITE_SIZE * WRITES_PER_FILE;
if (i < files.size() - 1) {
// This is because OpenWrite combines previous files into one.
expectedLen *= FILE_COUNT - 1;
}
Assert.assertEquals("Unexpected length for file " + f, expectedLen, len);
@Cleanup val inputStream = fs.open(f.getPath(), WRITE_SIZE);
byte[] fileReadBuffer = new byte[len];
inputStream.readFully(0, fileReadBuffer);
AssertExtensions.assertArrayEquals("Unexpected contents for file " + f, expectedData, expectedDataOffset, fileReadBuffer, 0, len);
expectedDataOffset += len;
}
}
use of io.pravega.segmentstore.contracts.BadOffsetException in project pravega by pravega.
the class SegmentAggregator method flushPendingAppends.
/**
* Flushes all Append Operations that can be flushed up to the maximum allowed flush size.
*
* @param timeout Timeout for the operation.
* @return A CompletableFuture that, when completed, will contain the result from the flush operation.
*/
private CompletableFuture<FlushResult> flushPendingAppends(Duration timeout) {
// Gather an InputStream made up of all the operations we can flush.
FlushArgs flushArgs;
try {
flushArgs = getFlushArgs();
} catch (DataCorruptionException ex) {
return Futures.failedFuture(ex);
}
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "flushPendingAppends");
if (flushArgs.getLength() == 0) {
// Nothing to flush.
FlushResult result = new FlushResult();
LoggerHelpers.traceLeave(log, this.traceObjectId, "flushPendingAppends", traceId, result);
return CompletableFuture.completedFuture(result);
}
// Flush them.
InputStream inputStream = flushArgs.getStream();
return this.storage.write(this.handle.get(), this.metadata.getStorageLength(), inputStream, flushArgs.getLength(), timeout).thenApplyAsync(v -> {
FlushResult result = updateStatePostFlush(flushArgs);
LoggerHelpers.traceLeave(log, this.traceObjectId, "flushPendingAppends", traceId, result);
return result;
}, this.executor).exceptionally(ex -> {
if (Exceptions.unwrap(ex) instanceof BadOffsetException) {
// We attempted to write at an offset that already contained other data. This can happen for a number of
// reasons, but we do not have enough information here to determine why. We need to enter reconciliation
// mode, and hope for the best.
setState(AggregatorState.ReconciliationNeeded);
}
// Rethrow all exceptions.
throw new CompletionException(ex);
});
}
use of io.pravega.segmentstore.contracts.BadOffsetException in project pravega by pravega.
the class SegmentAggregator method mergeWith.
/**
* Merges the Transaction StreamSegment with given metadata into this one at the current offset.
*
* @param transactionMetadata The metadata of the Transaction StreamSegment to merge.
* @param timer Timer for the operation.
* @return A CompletableFuture that, when completed, will contain the number of bytes that were merged into this
* StreamSegment. If failed, the Future will contain the exception that caused it.
*/
private CompletableFuture<FlushResult> mergeWith(UpdateableSegmentMetadata transactionMetadata, MergeTransactionOperation mergeOp, TimeoutTimer timer) {
if (transactionMetadata.isDeleted()) {
return Futures.failedFuture(new DataCorruptionException(String.format("Attempted to merge with deleted Transaction segment '%s'.", transactionMetadata.getName())));
}
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "mergeWith", transactionMetadata.getId(), transactionMetadata.getName(), transactionMetadata.isSealedInStorage());
FlushResult result = new FlushResult();
if (!transactionMetadata.isSealedInStorage() || transactionMetadata.getLength() > transactionMetadata.getStorageLength()) {
// Nothing to do. Given Transaction is not eligible for merger yet.
LoggerHelpers.traceLeave(log, this.traceObjectId, "mergeWith", traceId, result);
return CompletableFuture.completedFuture(result);
}
AtomicLong mergedLength = new AtomicLong();
return this.storage.getStreamSegmentInfo(transactionMetadata.getName(), timer.getRemaining()).thenAcceptAsync(transProperties -> {
// Check that the Storage agrees with our metadata (if not, we have a problem ...)
if (transProperties.getLength() != transactionMetadata.getStorageLength()) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' cannot be merged into parent '%s' because its metadata disagrees with the Storage. Metadata.StorageLength=%d, Storage.StorageLength=%d", transactionMetadata.getName(), this.metadata.getName(), transactionMetadata.getStorageLength(), transProperties.getLength())));
}
if (transProperties.getLength() != mergeOp.getLength()) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' cannot be merged into parent '%s' because the declared length in the operation disagrees with the Storage. Operation.Length=%d, Storage.StorageLength=%d", transactionMetadata.getName(), this.metadata.getName(), mergeOp.getLength(), transProperties.getLength())));
}
mergedLength.set(transProperties.getLength());
}, this.executor).thenComposeAsync(v1 -> storage.concat(this.handle.get(), mergeOp.getStreamSegmentOffset(), transactionMetadata.getName(), timer.getRemaining()), this.executor).thenComposeAsync(v2 -> storage.getStreamSegmentInfo(this.metadata.getName(), timer.getRemaining()), this.executor).thenApplyAsync(segmentProperties -> {
// We have processed a MergeTransactionOperation, pop the first operation off and decrement the counter.
StorageOperation processedOperation = this.operations.removeFirst();
assert processedOperation != null && processedOperation instanceof MergeTransactionOperation : "First outstanding operation was not a MergeTransactionOperation";
assert ((MergeTransactionOperation) processedOperation).getTransactionSegmentId() == transactionMetadata.getId() : "First outstanding operation was a MergeTransactionOperation for the wrong Transaction id.";
int newCount = this.mergeTransactionCount.decrementAndGet();
assert newCount >= 0 : "Negative value for mergeTransactionCount";
// Post-merger validation. Verify we are still in agreement with the storage.
long expectedNewLength = this.metadata.getStorageLength() + mergedLength.get();
if (segmentProperties.getLength() != expectedNewLength) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' was merged into parent '%s' but the parent segment has an unexpected StorageLength after the merger. Previous=%d, MergeLength=%d, Expected=%d, Actual=%d", transactionMetadata.getName(), this.metadata.getName(), segmentProperties.getLength(), mergedLength.get(), expectedNewLength, segmentProperties.getLength())));
}
updateMetadata(segmentProperties);
updateMetadataForTransactionPostMerger(transactionMetadata);
this.lastFlush.set(this.timer.getElapsed());
result.withMergedBytes(mergedLength.get());
LoggerHelpers.traceLeave(log, this.traceObjectId, "mergeWith", traceId, result);
return result;
}, this.executor).exceptionally(ex -> {
Throwable realEx = Exceptions.unwrap(ex);
if (realEx instanceof BadOffsetException || realEx instanceof StreamSegmentNotExistsException) {
// We either attempted to write at an offset that already contained other data or the Transaction
// Segment no longer exists. This can happen for a number of reasons, but we do not have enough
// information here to determine why. We need to enter reconciliation mode, and hope for the best.
setState(AggregatorState.ReconciliationNeeded);
}
// Rethrow all exceptions.
throw new CompletionException(ex);
});
}
use of io.pravega.segmentstore.contracts.BadOffsetException in project pravega by pravega.
the class RollingStorage method updateHandle.
private void updateHandle(RollingSegmentHandle handle, byte[] data) throws StreamSegmentException {
try {
this.baseStorage.write(handle.getHeaderHandle(), handle.getHeaderLength(), new ByteArrayInputStream(data), data.length);
handle.increaseHeaderLength(data.length);
log.debug("Header for '{}' updated with {} bytes for a length of {}.", handle.getSegmentName(), data.length, handle.getHeaderLength());
} catch (BadOffsetException ex) {
// If we get BadOffsetException when writing the Handle, it means it was modified externally.
throw new StorageNotPrimaryException(handle.getSegmentName(), ex);
}
}
use of io.pravega.segmentstore.contracts.BadOffsetException in project pravega by pravega.
the class StorageTestBase method testWrite.
/**
* Tests the write() method.
*
* @throws Exception if an unexpected error occurred.
*/
@Test
public void testWrite() throws Exception {
String segmentName = "foo_write";
int appendCount = 100;
try (Storage s = createStorage()) {
s.initialize(DEFAULT_EPOCH);
createSegment(segmentName, s);
// Invalid handle.
val readOnlyHandle = s.openRead(segmentName).join();
assertThrows("write() did not throw for read-only handle.", () -> s.write(readOnlyHandle, 0, new ByteArrayInputStream("h".getBytes()), 1, TIMEOUT), ex -> ex instanceof IllegalArgumentException);
assertThrows("write() did not throw for handle pointing to inexistent segment.", () -> s.write(createInexistentSegmentHandle(s, false), 0, new ByteArrayInputStream("h".getBytes()), 1, TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
val writeHandle = s.openWrite(segmentName).join();
long offset = 0;
for (int j = 0; j < appendCount; j++) {
byte[] writeData = String.format(APPEND_FORMAT, segmentName, j).getBytes();
// We intentionally add some garbage at the end of the dataStream to verify that write() takes into account
// the value of the "length" argument.
val dataStream = new SequenceInputStream(new ByteArrayInputStream(writeData), new ByteArrayInputStream(new byte[100]));
s.write(writeHandle, offset, dataStream, writeData.length, TIMEOUT).join();
offset += writeData.length;
}
// Check bad offset.
final long finalOffset = offset;
assertThrows("write() did not throw bad offset write (smaller).", () -> s.write(writeHandle, finalOffset - 1, new ByteArrayInputStream("h".getBytes()), 1, TIMEOUT), ex -> ex instanceof BadOffsetException);
assertThrows("write() did not throw bad offset write (larger).", () -> s.write(writeHandle, finalOffset + 1, new ByteArrayInputStream("h".getBytes()), 1, TIMEOUT), ex -> ex instanceof BadOffsetException);
// Check post-delete write.
s.delete(writeHandle, TIMEOUT).join();
assertThrows("write() did not throw for a deleted StreamSegment.", () -> s.write(writeHandle, 0, new ByteArrayInputStream(new byte[1]), 1, TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
}
}
Aggregations