use of io.pravega.segmentstore.storage.StorageNotPrimaryException in project pravega by pravega.
the class TruncateOperation method handleException.
private Void handleException(Void value, Throwable e) {
if (null != e) {
log.debug("{} truncate - exception op={}, segment={}, offset={}.", chunkedSegmentStorage.getLogPrefix(), System.identityHashCode(this), handle.getSegmentName(), offset);
val ex = Exceptions.unwrap(e);
if (ex instanceof StorageMetadataWritesFencedOutException) {
throw new CompletionException(new StorageNotPrimaryException(handle.getSegmentName(), ex));
}
throw new CompletionException(ex);
}
return value;
}
use of io.pravega.segmentstore.storage.StorageNotPrimaryException in project pravega by pravega.
the class HDFSStorageTest method testZombieFencing.
// region Fencing tests
/**
* A special test case of fencing to verify the behavior of HDFSStorage in the presence of an instance that has
* been fenced out. This case verifies that any ongoing writes properly fail upon fencing. Specifically, we have a
* fenced-out instance that keeps writing and we verify that the write fails once the ownership changes.
* The HDFS behavior is such in this case is that ongoing writes that execute before the rename
* complete successfully.
*/
@Test(timeout = 60000)
public void testZombieFencing() throws Exception {
final long epochCount = 30;
final int writeSize = 1000;
final String segmentName = "Segment";
@Cleanup val writtenData = new ByteBufferOutputStream();
final Random rnd = new Random(0);
int currentEpoch = 1;
// Create initial adapter.
val currentStorage = new AtomicReference<Storage>();
currentStorage.set(createStorage());
currentStorage.get().initialize(currentEpoch);
// Create the Segment and open it for the first time.
val currentHandle = new AtomicReference<SegmentHandle>(currentStorage.get().create(segmentName, TIMEOUT).thenCompose(v -> currentStorage.get().openWrite(segmentName)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS));
// Run a number of epochs.
while (currentEpoch <= epochCount) {
val oldStorage = currentStorage.get();
val handle = currentHandle.get();
val writeBuffer = new byte[writeSize];
val appends = Futures.loop(() -> true, () -> {
rnd.nextBytes(writeBuffer);
return oldStorage.write(handle, writtenData.size(), new ByteArrayInputStream(writeBuffer), writeBuffer.length, TIMEOUT).thenRun(() -> writtenData.write(writeBuffer));
}, executorService());
// Create a new Storage adapter with a new epoch and open-write the Segment, remembering its handle.
val newStorage = createStorage();
try {
newStorage.initialize(++currentEpoch);
currentHandle.set(newStorage.openWrite(segmentName).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS));
} catch (Exception ex) {
newStorage.close();
throw ex;
}
currentStorage.set(newStorage);
try {
appends.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.fail("Continuous appends on older epoch Adapter did not fail.");
} catch (Exception ex) {
val cause = Exceptions.unwrap(ex);
if (!(cause instanceof StorageNotPrimaryException || cause instanceof StreamSegmentSealedException || cause instanceof StreamSegmentNotExistsException)) {
// We only expect the appends to fail because they were fenced out or the Segment was sealed.
Assert.fail("Unexpected exception " + cause);
}
} finally {
oldStorage.close();
}
}
byte[] expectedData = writtenData.getData().getCopy();
byte[] readData = new byte[expectedData.length];
@Cleanup val readStorage = createStorage();
readStorage.initialize(++currentEpoch);
int bytesRead = readStorage.openRead(segmentName).thenCompose(handle -> readStorage.read(handle, 0, readData, 0, readData.length, TIMEOUT)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertEquals("Unexpected number of bytes read.", readData.length, bytesRead);
Assert.assertArrayEquals("Unexpected data read back.", expectedData, readData);
}
use of io.pravega.segmentstore.storage.StorageNotPrimaryException in project pravega by pravega.
the class HDFSStorage method openWrite.
@Override
public SegmentHandle openWrite(String streamSegmentName) throws StreamSegmentException {
ensureInitializedAndNotClosed();
long traceId = LoggerHelpers.traceEnter(log, "openWrite", streamSegmentName);
long fencedCount = 0;
do {
try {
FileStatus fileStatus = findStatusForSegment(streamSegmentName, true);
if (!isSealed(fileStatus.getPath())) {
if (getEpochFromPath(fileStatus.getPath()) > this.epoch) {
throw new StorageNotPrimaryException(streamSegmentName);
}
Path targetPath = getFilePath(streamSegmentName, this.epoch);
if (!targetPath.equals(fileStatus.getPath())) {
try {
this.fileSystem.rename(fileStatus.getPath(), targetPath);
} catch (FileNotFoundException e) {
// This happens when more than one host is trying to fence and only one of the host goes through.
// Retry the rename so that host with the highest epoch gets access.
// In the worst case, the current owner of the segment will win this race after a number of attempts
// equal to the number of Segment Stores in the race. The high bound for this number of attempts
// is the total number of Segment Store instances in the cluster.
// It is safe to retry for MAX_EPOCH times as we are sure that the loop will never go that long.
log.warn("Race in fencing. More than two hosts trying to own the segment. Retrying");
fencedCount++;
continue;
}
}
}
// Ensure that file exists
findStatusForSegment(streamSegmentName, true);
return HDFSSegmentHandle.write(streamSegmentName);
} catch (IOException e) {
throw HDFSExceptionHelpers.convertException(streamSegmentName, e);
}
// Looping for the maximum possible number.
} while (fencedCount <= this.epoch);
LoggerHelpers.traceLeave(log, "openWrite", traceId, epoch);
throw new StorageNotPrimaryException("Not able to fence out other writers.");
}
use of io.pravega.segmentstore.storage.StorageNotPrimaryException in project pravega by pravega.
the class HDFSStorage method write.
@Override
public void write(SegmentHandle handle, long offset, InputStream data, int length) throws StreamSegmentException {
ensureInitializedAndNotClosed();
long traceId = LoggerHelpers.traceEnter(log, "write", handle, offset, length);
handle = asWritableHandle(handle);
FileStatus status = null;
try {
status = findStatusForSegment(handle.getSegmentName(), true);
if (isSealed(status.getPath())) {
throw new StreamSegmentSealedException(handle.getSegmentName());
}
if (getEpochFromPath(status.getPath()) > this.epoch) {
throw new StorageNotPrimaryException(handle.getSegmentName());
}
} catch (IOException e) {
throw HDFSExceptionHelpers.convertException(handle.getSegmentName(), e);
}
Timer timer = new Timer();
try (FSDataOutputStream stream = this.fileSystem.append(status.getPath())) {
if (offset != status.getLen()) {
// before we throw BadOffsetException.
throw new BadOffsetException(handle.getSegmentName(), status.getLen(), offset);
} else if (stream.getPos() != offset) {
// Looks like the filesystem changed from underneath us. This could be our bug, but it could be something else.
log.warn("File changed detected for '{}'. Expected length = {}, actual length = {}.", status, status.getLen(), stream.getPos());
throw new BadOffsetException(handle.getSegmentName(), status.getLen(), offset);
}
if (length == 0) {
// Note: IOUtils.copyBytes with length == 0 will enter an infinite loop, hence the need for this check.
return;
}
// We need to be very careful with IOUtils.copyBytes. There are many overloads with very similar signatures.
// There is a difference between (InputStream, OutputStream, int, boolean) and (InputStream, OutputStream, long, boolean),
// in that the one with "int" uses the third arg as a buffer size, and the one with "long" uses it as the number
// of bytes to copy.
IOUtils.copyBytes(data, stream, (long) length, false);
stream.flush();
} catch (IOException ex) {
throw HDFSExceptionHelpers.convertException(handle.getSegmentName(), ex);
}
HDFSMetrics.WRITE_LATENCY.reportSuccessEvent(timer.getElapsed());
HDFSMetrics.WRITE_BYTES.add(length);
LoggerHelpers.traceLeave(log, "write", traceId, handle, offset, length);
}
use of io.pravega.segmentstore.storage.StorageNotPrimaryException in project pravega by pravega.
the class HDFSStorage method concat.
@Override
public void concat(SegmentHandle target, long offset, String sourceSegment) throws StreamSegmentException {
ensureInitializedAndNotClosed();
long traceId = LoggerHelpers.traceEnter(log, "concat", target, offset, sourceSegment);
target = asWritableHandle(target);
// Check for target offset and whether it is sealed.
FileStatus fileStatus = null;
try {
fileStatus = findStatusForSegment(target.getSegmentName(), true);
if (isSealed(fileStatus.getPath())) {
throw new StreamSegmentSealedException(target.getSegmentName());
} else if (getEpoch(fileStatus) > this.epoch) {
throw new StorageNotPrimaryException(target.getSegmentName());
} else if (fileStatus.getLen() != offset) {
throw new BadOffsetException(target.getSegmentName(), fileStatus.getLen(), offset);
}
} catch (IOException ex) {
throw HDFSExceptionHelpers.convertException(target.getSegmentName(), ex);
}
try {
FileStatus sourceFile = findStatusForSegment(sourceSegment, true);
Preconditions.checkState(isSealed(sourceFile.getPath()), "Cannot concat segment '%s' into '%s' because it is not sealed.", sourceSegment, target.getSegmentName());
// Concat source file into target.
this.fileSystem.concat(fileStatus.getPath(), new Path[] { sourceFile.getPath() });
} catch (IOException ex) {
throw HDFSExceptionHelpers.convertException(sourceSegment, ex);
}
LoggerHelpers.traceLeave(log, "concat", traceId, target, offset, sourceSegment);
}
Aggregations