Search in sources :

Example 21 with StorageNotPrimaryException

use of io.pravega.segmentstore.storage.StorageNotPrimaryException in project pravega by pravega.

the class TruncateOperation method handleException.

private Void handleException(Void value, Throwable e) {
    if (null != e) {
        log.debug("{} truncate - exception op={}, segment={}, offset={}.", chunkedSegmentStorage.getLogPrefix(), System.identityHashCode(this), handle.getSegmentName(), offset);
        val ex = Exceptions.unwrap(e);
        if (ex instanceof StorageMetadataWritesFencedOutException) {
            throw new CompletionException(new StorageNotPrimaryException(handle.getSegmentName(), ex));
        }
        throw new CompletionException(ex);
    }
    return value;
}
Also used : lombok.val(lombok.val) CompletionException(java.util.concurrent.CompletionException) StorageNotPrimaryException(io.pravega.segmentstore.storage.StorageNotPrimaryException) StorageMetadataWritesFencedOutException(io.pravega.segmentstore.storage.metadata.StorageMetadataWritesFencedOutException)

Example 22 with StorageNotPrimaryException

use of io.pravega.segmentstore.storage.StorageNotPrimaryException in project pravega by pravega.

the class HDFSStorageTest method testZombieFencing.

// region Fencing tests
/**
 * A special test case of fencing to verify the behavior of HDFSStorage in the presence of an instance that has
 * been fenced out. This case verifies that any ongoing writes properly fail upon fencing. Specifically, we have a
 * fenced-out instance that keeps writing and we verify that the write fails once the ownership changes.
 * The HDFS behavior is such in this case is that ongoing writes that execute before the rename
 * complete successfully.
 */
@Test(timeout = 60000)
public void testZombieFencing() throws Exception {
    final long epochCount = 30;
    final int writeSize = 1000;
    final String segmentName = "Segment";
    @Cleanup val writtenData = new ByteBufferOutputStream();
    final Random rnd = new Random(0);
    int currentEpoch = 1;
    // Create initial adapter.
    val currentStorage = new AtomicReference<Storage>();
    currentStorage.set(createStorage());
    currentStorage.get().initialize(currentEpoch);
    // Create the Segment and open it for the first time.
    val currentHandle = new AtomicReference<SegmentHandle>(currentStorage.get().create(segmentName, TIMEOUT).thenCompose(v -> currentStorage.get().openWrite(segmentName)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS));
    // Run a number of epochs.
    while (currentEpoch <= epochCount) {
        val oldStorage = currentStorage.get();
        val handle = currentHandle.get();
        val writeBuffer = new byte[writeSize];
        val appends = Futures.loop(() -> true, () -> {
            rnd.nextBytes(writeBuffer);
            return oldStorage.write(handle, writtenData.size(), new ByteArrayInputStream(writeBuffer), writeBuffer.length, TIMEOUT).thenRun(() -> writtenData.write(writeBuffer));
        }, executorService());
        // Create a new Storage adapter with a new epoch and open-write the Segment, remembering its handle.
        val newStorage = createStorage();
        try {
            newStorage.initialize(++currentEpoch);
            currentHandle.set(newStorage.openWrite(segmentName).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS));
        } catch (Exception ex) {
            newStorage.close();
            throw ex;
        }
        currentStorage.set(newStorage);
        try {
            appends.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
            Assert.fail("Continuous appends on older epoch Adapter did not fail.");
        } catch (Exception ex) {
            val cause = Exceptions.unwrap(ex);
            if (!(cause instanceof StorageNotPrimaryException || cause instanceof StreamSegmentSealedException || cause instanceof StreamSegmentNotExistsException)) {
                // We only expect the appends to fail because they were fenced out or the Segment was sealed.
                Assert.fail("Unexpected exception " + cause);
            }
        } finally {
            oldStorage.close();
        }
    }
    byte[] expectedData = writtenData.getData().getCopy();
    byte[] readData = new byte[expectedData.length];
    @Cleanup val readStorage = createStorage();
    readStorage.initialize(++currentEpoch);
    int bytesRead = readStorage.openRead(segmentName).thenCompose(handle -> readStorage.read(handle, 0, readData, 0, readData.length, TIMEOUT)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    Assert.assertEquals("Unexpected number of bytes read.", readData.length, bytesRead);
    Assert.assertArrayEquals("Unexpected data read back.", expectedData, readData);
}
Also used : lombok.val(lombok.val) Storage(io.pravega.segmentstore.storage.Storage) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Progressable(org.apache.hadoop.util.Progressable) ByteArrayOutputStream(java.io.ByteArrayOutputStream) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) SneakyThrows(lombok.SneakyThrows) AssertExtensions(io.pravega.test.common.AssertExtensions) FileSystem(org.apache.hadoop.fs.FileSystem) AclException(org.apache.hadoop.hdfs.protocol.AclException) Exceptions(io.pravega.common.Exceptions) StorageNotPrimaryException(io.pravega.segmentstore.storage.StorageNotPrimaryException) AsyncStorageWrapper(io.pravega.segmentstore.storage.AsyncStorageWrapper) Cleanup(lombok.Cleanup) Random(java.util.Random) AtomicReference(java.util.concurrent.atomic.AtomicReference) FsAction(org.apache.hadoop.fs.permission.FsAction) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) ByteArrayInputStream(java.io.ByteArrayInputStream) StorageTestBase(io.pravega.segmentstore.storage.StorageTestBase) Configuration(org.apache.hadoop.conf.Configuration) After(org.junit.After) Timeout(org.junit.rules.Timeout) Path(org.apache.hadoop.fs.Path) RollingStorageTestBase(io.pravega.segmentstore.storage.rolling.RollingStorageTestBase) Before(org.junit.Before) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Files(java.nio.file.Files) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) FileHelpers(io.pravega.common.io.FileHelpers) File(java.io.File) ByteBufferOutputStream(io.pravega.common.io.ByteBufferOutputStream) TimeUnit(java.util.concurrent.TimeUnit) Rule(org.junit.Rule) Assert(org.junit.Assert) Futures(io.pravega.common.concurrent.Futures) TemporaryFolder(org.junit.rules.TemporaryFolder) AtomicReference(java.util.concurrent.atomic.AtomicReference) Cleanup(lombok.Cleanup) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AclException(org.apache.hadoop.hdfs.protocol.AclException) StorageNotPrimaryException(io.pravega.segmentstore.storage.StorageNotPrimaryException) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) IOException(java.io.IOException) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) Random(java.util.Random) ByteBufferOutputStream(io.pravega.common.io.ByteBufferOutputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) StorageNotPrimaryException(io.pravega.segmentstore.storage.StorageNotPrimaryException) Test(org.junit.Test)

Example 23 with StorageNotPrimaryException

use of io.pravega.segmentstore.storage.StorageNotPrimaryException in project pravega by pravega.

the class HDFSStorage method openWrite.

@Override
public SegmentHandle openWrite(String streamSegmentName) throws StreamSegmentException {
    ensureInitializedAndNotClosed();
    long traceId = LoggerHelpers.traceEnter(log, "openWrite", streamSegmentName);
    long fencedCount = 0;
    do {
        try {
            FileStatus fileStatus = findStatusForSegment(streamSegmentName, true);
            if (!isSealed(fileStatus.getPath())) {
                if (getEpochFromPath(fileStatus.getPath()) > this.epoch) {
                    throw new StorageNotPrimaryException(streamSegmentName);
                }
                Path targetPath = getFilePath(streamSegmentName, this.epoch);
                if (!targetPath.equals(fileStatus.getPath())) {
                    try {
                        this.fileSystem.rename(fileStatus.getPath(), targetPath);
                    } catch (FileNotFoundException e) {
                        // This happens when more than one host is trying to fence and only one of the host goes through.
                        // Retry the rename so that host with the highest epoch gets access.
                        // In the worst case, the current owner of the segment will win this race after a number of attempts
                        // equal to the number of Segment Stores in the race. The high bound for this number of attempts
                        // is the total number of Segment Store instances in the cluster.
                        // It is safe to retry for MAX_EPOCH times as we are sure that the loop will never go that long.
                        log.warn("Race in fencing. More than two hosts trying to own the segment. Retrying");
                        fencedCount++;
                        continue;
                    }
                }
            }
            // Ensure that file exists
            findStatusForSegment(streamSegmentName, true);
            return HDFSSegmentHandle.write(streamSegmentName);
        } catch (IOException e) {
            throw HDFSExceptionHelpers.convertException(streamSegmentName, e);
        }
    // Looping for the maximum possible number.
    } while (fencedCount <= this.epoch);
    LoggerHelpers.traceLeave(log, "openWrite", traceId, epoch);
    throw new StorageNotPrimaryException("Not able to fence out other writers.");
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) StorageNotPrimaryException(io.pravega.segmentstore.storage.StorageNotPrimaryException)

Example 24 with StorageNotPrimaryException

use of io.pravega.segmentstore.storage.StorageNotPrimaryException in project pravega by pravega.

the class HDFSStorage method write.

@Override
public void write(SegmentHandle handle, long offset, InputStream data, int length) throws StreamSegmentException {
    ensureInitializedAndNotClosed();
    long traceId = LoggerHelpers.traceEnter(log, "write", handle, offset, length);
    handle = asWritableHandle(handle);
    FileStatus status = null;
    try {
        status = findStatusForSegment(handle.getSegmentName(), true);
        if (isSealed(status.getPath())) {
            throw new StreamSegmentSealedException(handle.getSegmentName());
        }
        if (getEpochFromPath(status.getPath()) > this.epoch) {
            throw new StorageNotPrimaryException(handle.getSegmentName());
        }
    } catch (IOException e) {
        throw HDFSExceptionHelpers.convertException(handle.getSegmentName(), e);
    }
    Timer timer = new Timer();
    try (FSDataOutputStream stream = this.fileSystem.append(status.getPath())) {
        if (offset != status.getLen()) {
            // before we throw BadOffsetException.
            throw new BadOffsetException(handle.getSegmentName(), status.getLen(), offset);
        } else if (stream.getPos() != offset) {
            // Looks like the filesystem changed from underneath us. This could be our bug, but it could be something else.
            log.warn("File changed detected for '{}'. Expected length = {}, actual length = {}.", status, status.getLen(), stream.getPos());
            throw new BadOffsetException(handle.getSegmentName(), status.getLen(), offset);
        }
        if (length == 0) {
            // Note: IOUtils.copyBytes with length == 0 will enter an infinite loop, hence the need for this check.
            return;
        }
        // We need to be very careful with IOUtils.copyBytes. There are many overloads with very similar signatures.
        // There is a difference between (InputStream, OutputStream, int, boolean) and (InputStream, OutputStream, long, boolean),
        // in that the one with "int" uses the third arg as a buffer size, and the one with "long" uses it as the number
        // of bytes to copy.
        IOUtils.copyBytes(data, stream, (long) length, false);
        stream.flush();
    } catch (IOException ex) {
        throw HDFSExceptionHelpers.convertException(handle.getSegmentName(), ex);
    }
    HDFSMetrics.WRITE_LATENCY.reportSuccessEvent(timer.getElapsed());
    HDFSMetrics.WRITE_BYTES.add(length);
    LoggerHelpers.traceLeave(log, "write", traceId, handle, offset, length);
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) Timer(io.pravega.common.Timer) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) IOException(java.io.IOException) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) StorageNotPrimaryException(io.pravega.segmentstore.storage.StorageNotPrimaryException)

Example 25 with StorageNotPrimaryException

use of io.pravega.segmentstore.storage.StorageNotPrimaryException in project pravega by pravega.

the class HDFSStorage method concat.

@Override
public void concat(SegmentHandle target, long offset, String sourceSegment) throws StreamSegmentException {
    ensureInitializedAndNotClosed();
    long traceId = LoggerHelpers.traceEnter(log, "concat", target, offset, sourceSegment);
    target = asWritableHandle(target);
    // Check for target offset and whether it is sealed.
    FileStatus fileStatus = null;
    try {
        fileStatus = findStatusForSegment(target.getSegmentName(), true);
        if (isSealed(fileStatus.getPath())) {
            throw new StreamSegmentSealedException(target.getSegmentName());
        } else if (getEpoch(fileStatus) > this.epoch) {
            throw new StorageNotPrimaryException(target.getSegmentName());
        } else if (fileStatus.getLen() != offset) {
            throw new BadOffsetException(target.getSegmentName(), fileStatus.getLen(), offset);
        }
    } catch (IOException ex) {
        throw HDFSExceptionHelpers.convertException(target.getSegmentName(), ex);
    }
    try {
        FileStatus sourceFile = findStatusForSegment(sourceSegment, true);
        Preconditions.checkState(isSealed(sourceFile.getPath()), "Cannot concat segment '%s' into '%s' because it is not sealed.", sourceSegment, target.getSegmentName());
        // Concat source file into target.
        this.fileSystem.concat(fileStatus.getPath(), new Path[] { sourceFile.getPath() });
    } catch (IOException ex) {
        throw HDFSExceptionHelpers.convertException(sourceSegment, ex);
    }
    LoggerHelpers.traceLeave(log, "concat", traceId, target, offset, sourceSegment);
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) IOException(java.io.IOException) StorageNotPrimaryException(io.pravega.segmentstore.storage.StorageNotPrimaryException)

Aggregations

StorageNotPrimaryException (io.pravega.segmentstore.storage.StorageNotPrimaryException)29 lombok.val (lombok.val)22 Cleanup (lombok.Cleanup)15 Test (org.junit.Test)15 ByteArrayInputStream (java.io.ByteArrayInputStream)10 IOException (java.io.IOException)6 Path (org.apache.hadoop.fs.Path)6 FileStatus (org.apache.hadoop.fs.FileStatus)5 BadOffsetException (io.pravega.segmentstore.contracts.BadOffsetException)4 SegmentHandle (io.pravega.segmentstore.storage.SegmentHandle)4 StreamSegmentSealedException (io.pravega.segmentstore.contracts.StreamSegmentSealedException)3 StorageMetadataWritesFencedOutException (io.pravega.segmentstore.storage.metadata.StorageMetadataWritesFencedOutException)3 CompletionException (java.util.concurrent.CompletionException)3 StorageFullException (io.pravega.segmentstore.storage.StorageFullException)2 FileNotFoundException (java.io.FileNotFoundException)2 ArrayList (java.util.ArrayList)2 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 Exceptions (io.pravega.common.Exceptions)1 Timer (io.pravega.common.Timer)1 Futures (io.pravega.common.concurrent.Futures)1