use of io.pravega.segmentstore.contracts.StreamSegmentTruncatedException in project pravega by pravega.
the class RollingStorage method read.
@Override
public int read(SegmentHandle handle, long offset, byte[] buffer, int bufferOffset, int length) throws StreamSegmentException {
val h = asReadableHandle(handle);
long traceId = LoggerHelpers.traceEnter(log, "read", handle, offset, length);
ensureNotDeleted(h);
Exceptions.checkArrayRange(bufferOffset, length, buffer.length, "bufferOffset", "length");
if (offset < 0 || bufferOffset < 0 || length < 0 || buffer.length < bufferOffset + length) {
throw new ArrayIndexOutOfBoundsException(String.format("Offset (%s) must be non-negative, and bufferOffset (%s) and length (%s) must be valid indices into buffer of size %s.", offset, bufferOffset, length, buffer.length));
}
if (h.isReadOnly() && !h.isSealed() && offset + length > h.length()) {
// We have a non-sealed read-only handle. It's possible that the SegmentChunks may have been modified since
// the last time we refreshed it, and we received a request for a read beyond our last known offset. Reload
// the handle before attempting the read.
val newHandle = (RollingSegmentHandle) openRead(handle.getSegmentName());
h.refresh(newHandle);
log.debug("Handle refreshed: {}.", h);
}
Preconditions.checkArgument(offset < h.length(), "Offset %s is beyond the last offset %s of the segment.", offset, h.length());
Preconditions.checkArgument(offset + length <= h.length(), "Offset %s + length %s is beyond the last offset %s of the segment.", offset, length, h.length());
// Read in a loop, from each SegmentChunk, until we can't read anymore.
// If at any point we encounter a StreamSegmentNotExistsException, fail immediately with StreamSegmentTruncatedException (+inner).
val chunks = h.chunks();
int currentIndex = CollectionHelpers.binarySearch(chunks, s -> offset < s.getStartOffset() ? -1 : (offset >= s.getLastOffset() ? 1 : 0));
assert currentIndex >= 0 : "unable to locate first SegmentChunk index.";
try {
int bytesRead = 0;
while (bytesRead < length && currentIndex < chunks.size()) {
// Verify if this is a known truncated SegmentChunk; if so, bail out quickly.
SegmentChunk current = chunks.get(currentIndex);
checkTruncatedSegment(null, h, current);
if (current.getLength() == 0) {
// Empty SegmentChunk; don't bother trying to read from it.
continue;
}
long readOffset = offset + bytesRead - current.getStartOffset();
int readLength = (int) Math.min(length - bytesRead, current.getLength() - readOffset);
assert readOffset >= 0 && readLength >= 0 : "negative readOffset or readLength";
// Read from the actual SegmentChunk into the given buffer.
try {
val sh = this.baseStorage.openRead(current.getName());
int count = this.baseStorage.read(sh, readOffset, buffer, bufferOffset + bytesRead, readLength);
bytesRead += count;
if (readOffset + count >= current.getLength()) {
currentIndex++;
}
} catch (StreamSegmentNotExistsException ex) {
log.debug("SegmentChunk '{}' does not exist anymore ({}).", current, h);
checkTruncatedSegment(ex, h, current);
}
}
LoggerHelpers.traceLeave(log, "read", traceId, handle, offset, bytesRead);
return bytesRead;
} catch (StreamSegmentTruncatedException ex) {
// It's possible that the Segment has been truncated or deleted altogether using another handle. We need to
// refresh the handle and throw the appropriate exception.
val newHandle = (RollingSegmentHandle) openRead(handle.getSegmentName());
h.refresh(newHandle);
if (h.isDeleted()) {
log.debug("Segment '{}' has been deleted. Cannot read anymore.", h);
throw new StreamSegmentNotExistsException(handle.getSegmentName(), ex);
} else {
throw ex;
}
}
}
use of io.pravega.segmentstore.contracts.StreamSegmentTruncatedException in project pravega by pravega.
the class RollingStorageTests method testTruncate.
/**
* Tests the ability to truncate Segments.
*/
@Test
public void testTruncate() throws Exception {
// Write small and large writes, alternatively.
@Cleanup val baseStorage = new TestStorage();
@Cleanup val s = new RollingStorage(baseStorage, DEFAULT_ROLLING_POLICY);
s.initialize(1);
s.create(SEGMENT_NAME);
val writeHandle = (RollingSegmentHandle) s.openWrite(SEGMENT_NAME);
// Open now, before writing, so we force a refresh.
val readHandle = s.openRead(SEGMENT_NAME);
val writeStream = new ByteArrayOutputStream();
populate(s, writeHandle, writeStream);
byte[] writtenData = writeStream.toByteArray();
int truncateOffset = 0;
while (true) {
s.truncate(writeHandle, truncateOffset);
// Verify we can still read properly.
checkWrittenData(writtenData, truncateOffset, readHandle, s);
// Verify each SegmentChunk's existence.
for (SegmentChunk segmentChunk : writeHandle.chunks()) {
boolean expectedExists = segmentChunk.getLastOffset() > truncateOffset || (segmentChunk.getStartOffset() == segmentChunk.getLastOffset() && segmentChunk.getLastOffset() == truncateOffset);
Assert.assertEquals("Unexpected SegmentChunk truncation status for " + segmentChunk + ", truncation offset = " + truncateOffset, expectedExists, segmentChunk.exists());
boolean existsInStorage = baseStorage.exists(segmentChunk.getName());
Assert.assertEquals("Expected SegmentChunk deletion status for " + segmentChunk + ", truncation offset = " + truncateOffset, expectedExists, existsInStorage);
if (!expectedExists) {
AssertExtensions.assertThrows("Not expecting a read from a truncated SegmentChunk to work.", () -> s.read(readHandle, segmentChunk.getLastOffset() - 1, new byte[1], 0, 1), ex -> ex instanceof StreamSegmentTruncatedException);
}
}
// Increment truncateOffset by some value, but let's make sure we also truncate at the very end of the Segment.
if (truncateOffset >= writtenData.length) {
break;
}
truncateOffset = (int) Math.min(writtenData.length, truncateOffset + DEFAULT_ROLLING_POLICY.getMaxLength() / 2);
}
// Do some more writes and verify they are added properly.
int startOffset = writtenData.length;
populate(s, writeHandle, writeStream);
writtenData = writeStream.toByteArray();
checkWrittenData(writtenData, startOffset, readHandle, s);
// Verify we cannot concat a truncated segment into another.
final String targetSegmentName = "TargetSegment";
s.create(targetSegmentName);
val targetSegmentHandle = s.openWrite(targetSegmentName);
s.seal(writeHandle);
AssertExtensions.assertThrows("concat() allowed using a truncated segment as a source.", () -> s.concat(targetSegmentHandle, 0, SEGMENT_NAME), ex -> ex instanceof IllegalStateException);
}
use of io.pravega.segmentstore.contracts.StreamSegmentTruncatedException in project pravega by pravega.
the class PravegaRequestProcessor method handleReadResult.
/**
* Handles a readResult.
* If there are cached entries that can be returned without blocking only these are returned.
* Otherwise the call will request the data and setup a callback to return the data when it is available.
* If no data is available but it was detected that the Segment had been truncated beyond the current offset,
* an appropriate message is sent back over the connection.
*/
private void handleReadResult(ReadSegment request, ReadResult result) {
String segment = request.getSegment();
ArrayList<ReadResultEntryContents> cachedEntries = new ArrayList<>();
ReadResultEntry nonCachedEntry = collectCachedEntries(request.getOffset(), result, cachedEntries);
boolean truncated = nonCachedEntry != null && nonCachedEntry.getType() == Truncated;
boolean endOfSegment = nonCachedEntry != null && nonCachedEntry.getType() == EndOfStreamSegment;
boolean atTail = nonCachedEntry != null && nonCachedEntry.getType() == Future;
if (!cachedEntries.isEmpty() || endOfSegment) {
// We managed to collect some data. Send it.
ByteBuffer data = copyData(cachedEntries);
SegmentRead reply = new SegmentRead(segment, request.getOffset(), atTail, endOfSegment, data);
connection.send(reply);
} else if (truncated) {
// We didn't collect any data, instead we determined that the current read offset was truncated.
// Determine the current Start Offset and send that back.
segmentStore.getStreamSegmentInfo(segment, false, TIMEOUT).thenAccept(info -> connection.send(new SegmentIsTruncated(nonCachedEntry.getStreamSegmentOffset(), segment, info.getStartOffset()))).exceptionally(e -> handleException(nonCachedEntry.getStreamSegmentOffset(), segment, "Read segment", e));
} else {
Preconditions.checkState(nonCachedEntry != null, "No ReadResultEntries returned from read!?");
nonCachedEntry.requestContent(TIMEOUT);
nonCachedEntry.getContent().thenAccept(contents -> {
ByteBuffer data = copyData(Collections.singletonList(contents));
connection.send(new SegmentRead(segment, nonCachedEntry.getStreamSegmentOffset(), false, endOfSegment, data));
}).exceptionally(e -> {
if (Exceptions.unwrap(e) instanceof StreamSegmentTruncatedException) {
// The Segment may have been truncated in Storage after we got this entry but before we managed
// to make a read. In that case, send the appropriate error back.
connection.send(new SegmentIsTruncated(nonCachedEntry.getStreamSegmentOffset(), segment, nonCachedEntry.getStreamSegmentOffset()));
} else {
handleException(nonCachedEntry.getStreamSegmentOffset(), segment, "Read segment", e);
}
return null;
}).exceptionally(e -> handleException(nonCachedEntry.getStreamSegmentOffset(), segment, "Read segment", e));
}
}
Aggregations