Search in sources :

Example 26 with SegmentHandle

use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.

the class ChunkedSegmentStorage method openRead.

@Override
public CompletableFuture<SegmentHandle> openRead(String streamSegmentName) {
    checkInitialized();
    return executeParallel(() -> {
        val traceId = LoggerHelpers.traceEnter(log, "openRead", streamSegmentName);
        val timer = new Timer();
        // Validate preconditions and return handle.
        Preconditions.checkNotNull(streamSegmentName, "streamSegmentName");
        log.debug("{} openRead - started segment={}.", logPrefix, streamSegmentName);
        return tryWith(metadataStore.beginTransaction(false, streamSegmentName), txn -> txn.get(streamSegmentName).thenComposeAsync(storageMetadata -> {
            val segmentMetadata = (SegmentMetadata) storageMetadata;
            checkSegmentExists(streamSegmentName, segmentMetadata);
            segmentMetadata.checkInvariants();
            // This segment was created by an older segment store. Then claim ownership and adjust length.
            final CompletableFuture<Void> f;
            if (segmentMetadata.getOwnerEpoch() < this.epoch) {
                log.debug("{} openRead - Segment needs ownership change. segment={}.", logPrefix, segmentMetadata.getName());
                // In case of a fail-over, length recorded in metadata will be lagging behind its actual length in the storage.
                // This can happen with lazy commits that were still not committed at the time of fail-over.
                f = executeSerialized(() -> claimOwnership(txn, segmentMetadata), streamSegmentName);
            } else {
                f = CompletableFuture.completedFuture(null);
            }
            return f.thenApplyAsync(v -> {
                val retValue = SegmentStorageHandle.readHandle(streamSegmentName);
                log.debug("{} openRead - finished segment={} latency={}.", logPrefix, streamSegmentName, timer.getElapsedMillis());
                LoggerHelpers.traceLeave(log, "openRead", traceId, retValue);
                return retValue;
            }, executor);
        }, executor), executor).handleAsync((v, ex) -> {
            if (null != ex) {
                log.debug("{} openRead - exception segment={} latency={}.", logPrefix, streamSegmentName, timer.getElapsedMillis(), ex);
                handleException(streamSegmentName, ex);
            }
            return v;
        }, executor);
    }, streamSegmentName);
}
Also used : lombok.val(lombok.val) Arrays(java.util.Arrays) Storage(io.pravega.segmentstore.storage.Storage) ScheduledFuture(java.util.concurrent.ScheduledFuture) StreamSegmentInformation(io.pravega.segmentstore.contracts.StreamSegmentInformation) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) StorageNotPrimaryException(io.pravega.segmentstore.storage.StorageNotPrimaryException) SLTS_STORAGE_USED_PERCENTAGE(io.pravega.shared.MetricsNames.SLTS_STORAGE_USED_PERCENTAGE) StorageFullException(io.pravega.segmentstore.storage.StorageFullException) ImmutableDate(io.pravega.common.util.ImmutableDate) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) STORAGE_METADATA_SIZE(io.pravega.shared.MetricsNames.STORAGE_METADATA_SIZE) Duration(java.time.Duration) MetadataTransaction(io.pravega.segmentstore.storage.metadata.MetadataTransaction) ChunkMetadata(io.pravega.segmentstore.storage.metadata.ChunkMetadata) StorageMetadataWritesFencedOutException(io.pravega.segmentstore.storage.metadata.StorageMetadataWritesFencedOutException) CompletionException(java.util.concurrent.CompletionException) GuardedBy(javax.annotation.concurrent.GuardedBy) StatusFlags(io.pravega.segmentstore.storage.metadata.StatusFlags) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) StreamSegmentExistsException(io.pravega.segmentstore.contracts.StreamSegmentExistsException) ConcurrentModificationException(java.util.ConcurrentModificationException) Futures(io.pravega.common.concurrent.Futures) Getter(lombok.Getter) SegmentRollingPolicy(io.pravega.segmentstore.storage.SegmentRollingPolicy) SLTS_STORAGE_USED_BYTES(io.pravega.shared.MetricsNames.SLTS_STORAGE_USED_BYTES) Exceptions(io.pravega.common.Exceptions) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Callable(java.util.concurrent.Callable) CompletableFuture(java.util.concurrent.CompletableFuture) Function(java.util.function.Function) SLTS_DELETE_LATENCY(io.pravega.segmentstore.storage.chunklayer.ChunkStorageMetrics.SLTS_DELETE_LATENCY) HashSet(java.util.HashSet) SegmentMetadata(io.pravega.segmentstore.storage.metadata.SegmentMetadata) MultiKeySequentialProcessor(io.pravega.common.concurrent.MultiKeySequentialProcessor) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) SLTS_CREATE_LATENCY(io.pravega.segmentstore.storage.chunklayer.ChunkStorageMetrics.SLTS_CREATE_LATENCY) SLTS_DELETE_COUNT(io.pravega.segmentstore.storage.chunklayer.ChunkStorageMetrics.SLTS_DELETE_COUNT) LoggerHelpers(io.pravega.common.LoggerHelpers) NameUtils(io.pravega.shared.NameUtils) Iterator(java.util.Iterator) SLTS_CREATE_COUNT(io.pravega.segmentstore.storage.chunklayer.ChunkStorageMetrics.SLTS_CREATE_COUNT) Executor(java.util.concurrent.Executor) STORAGE_METADATA_NUM_CHUNKS(io.pravega.shared.MetricsNames.STORAGE_METADATA_NUM_CHUNKS) INTERNAL_SCOPE_PREFIX(io.pravega.shared.NameUtils.INTERNAL_SCOPE_PREFIX) lombok.val(lombok.val) Timer(io.pravega.common.Timer) Beta(com.google.common.annotations.Beta) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) ChunkMetadataStore(io.pravega.segmentstore.storage.metadata.ChunkMetadataStore) ReadIndexBlockMetadata(io.pravega.segmentstore.storage.metadata.ReadIndexBlockMetadata) Preconditions(com.google.common.base.Preconditions) InputStream(java.io.InputStream) SegmentMetadata(io.pravega.segmentstore.storage.metadata.SegmentMetadata) Timer(io.pravega.common.Timer)

Example 27 with SegmentHandle

use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.

the class ChunkedSegmentStorage method openWrite.

@Override
public CompletableFuture<SegmentHandle> openWrite(String streamSegmentName) {
    checkInitialized();
    return executeSerialized(() -> {
        val traceId = LoggerHelpers.traceEnter(log, "openWrite", streamSegmentName);
        val timer = new Timer();
        Preconditions.checkNotNull(streamSegmentName, "streamSegmentName");
        log.debug("{} openWrite - started segment={}.", logPrefix, streamSegmentName);
        return tryWith(metadataStore.beginTransaction(false, streamSegmentName), txn -> txn.get(streamSegmentName).thenComposeAsync(storageMetadata -> {
            val segmentMetadata = (SegmentMetadata) storageMetadata;
            checkSegmentExists(streamSegmentName, segmentMetadata);
            segmentMetadata.checkInvariants();
            // This segment was created by an older segment store. Need to start a fresh new chunk.
            final CompletableFuture<Void> f;
            if (segmentMetadata.getOwnerEpoch() < this.epoch) {
                log.debug("{} openWrite - Segment needs ownership change - segment={}.", logPrefix, segmentMetadata.getName());
                f = claimOwnership(txn, segmentMetadata);
            } else {
                f = CompletableFuture.completedFuture(null);
            }
            return f.thenApplyAsync(v -> {
                // If created by newer instance then abort.
                checkOwnership(streamSegmentName, segmentMetadata);
                // This instance is the owner, return a handle.
                val retValue = SegmentStorageHandle.writeHandle(streamSegmentName);
                log.debug("{} openWrite - finished segment={} latency={}.", logPrefix, streamSegmentName, timer.getElapsedMillis());
                LoggerHelpers.traceLeave(log, "openWrite", traceId, retValue);
                return retValue;
            }, executor);
        }, executor), executor).handleAsync((v, ex) -> {
            if (null != ex) {
                log.debug("{} openWrite - exception segment={} latency={}.", logPrefix, streamSegmentName, timer.getElapsedMillis(), ex);
                handleException(streamSegmentName, ex);
            }
            return v;
        }, executor);
    }, streamSegmentName);
}
Also used : lombok.val(lombok.val) Arrays(java.util.Arrays) Storage(io.pravega.segmentstore.storage.Storage) ScheduledFuture(java.util.concurrent.ScheduledFuture) StreamSegmentInformation(io.pravega.segmentstore.contracts.StreamSegmentInformation) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) StorageNotPrimaryException(io.pravega.segmentstore.storage.StorageNotPrimaryException) SLTS_STORAGE_USED_PERCENTAGE(io.pravega.shared.MetricsNames.SLTS_STORAGE_USED_PERCENTAGE) StorageFullException(io.pravega.segmentstore.storage.StorageFullException) ImmutableDate(io.pravega.common.util.ImmutableDate) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) STORAGE_METADATA_SIZE(io.pravega.shared.MetricsNames.STORAGE_METADATA_SIZE) Duration(java.time.Duration) MetadataTransaction(io.pravega.segmentstore.storage.metadata.MetadataTransaction) ChunkMetadata(io.pravega.segmentstore.storage.metadata.ChunkMetadata) StorageMetadataWritesFencedOutException(io.pravega.segmentstore.storage.metadata.StorageMetadataWritesFencedOutException) CompletionException(java.util.concurrent.CompletionException) GuardedBy(javax.annotation.concurrent.GuardedBy) StatusFlags(io.pravega.segmentstore.storage.metadata.StatusFlags) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) StreamSegmentExistsException(io.pravega.segmentstore.contracts.StreamSegmentExistsException) ConcurrentModificationException(java.util.ConcurrentModificationException) Futures(io.pravega.common.concurrent.Futures) Getter(lombok.Getter) SegmentRollingPolicy(io.pravega.segmentstore.storage.SegmentRollingPolicy) SLTS_STORAGE_USED_BYTES(io.pravega.shared.MetricsNames.SLTS_STORAGE_USED_BYTES) Exceptions(io.pravega.common.Exceptions) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Callable(java.util.concurrent.Callable) CompletableFuture(java.util.concurrent.CompletableFuture) Function(java.util.function.Function) SLTS_DELETE_LATENCY(io.pravega.segmentstore.storage.chunklayer.ChunkStorageMetrics.SLTS_DELETE_LATENCY) HashSet(java.util.HashSet) SegmentMetadata(io.pravega.segmentstore.storage.metadata.SegmentMetadata) MultiKeySequentialProcessor(io.pravega.common.concurrent.MultiKeySequentialProcessor) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) SLTS_CREATE_LATENCY(io.pravega.segmentstore.storage.chunklayer.ChunkStorageMetrics.SLTS_CREATE_LATENCY) SLTS_DELETE_COUNT(io.pravega.segmentstore.storage.chunklayer.ChunkStorageMetrics.SLTS_DELETE_COUNT) LoggerHelpers(io.pravega.common.LoggerHelpers) NameUtils(io.pravega.shared.NameUtils) Iterator(java.util.Iterator) SLTS_CREATE_COUNT(io.pravega.segmentstore.storage.chunklayer.ChunkStorageMetrics.SLTS_CREATE_COUNT) Executor(java.util.concurrent.Executor) STORAGE_METADATA_NUM_CHUNKS(io.pravega.shared.MetricsNames.STORAGE_METADATA_NUM_CHUNKS) INTERNAL_SCOPE_PREFIX(io.pravega.shared.NameUtils.INTERNAL_SCOPE_PREFIX) lombok.val(lombok.val) Timer(io.pravega.common.Timer) Beta(com.google.common.annotations.Beta) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) ChunkMetadataStore(io.pravega.segmentstore.storage.metadata.ChunkMetadataStore) ReadIndexBlockMetadata(io.pravega.segmentstore.storage.metadata.ReadIndexBlockMetadata) Preconditions(com.google.common.base.Preconditions) InputStream(java.io.InputStream) SegmentMetadata(io.pravega.segmentstore.storage.metadata.SegmentMetadata) Timer(io.pravega.common.Timer)

Example 28 with SegmentHandle

use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.

the class HDFSStorageTest method testZombieFencing.

// region Fencing tests
/**
 * A special test case of fencing to verify the behavior of HDFSStorage in the presence of an instance that has
 * been fenced out. This case verifies that any ongoing writes properly fail upon fencing. Specifically, we have a
 * fenced-out instance that keeps writing and we verify that the write fails once the ownership changes.
 * The HDFS behavior is such in this case is that ongoing writes that execute before the rename
 * complete successfully.
 */
@Test(timeout = 60000)
public void testZombieFencing() throws Exception {
    final long epochCount = 30;
    final int writeSize = 1000;
    final String segmentName = "Segment";
    @Cleanup val writtenData = new ByteBufferOutputStream();
    final Random rnd = new Random(0);
    int currentEpoch = 1;
    // Create initial adapter.
    val currentStorage = new AtomicReference<Storage>();
    currentStorage.set(createStorage());
    currentStorage.get().initialize(currentEpoch);
    // Create the Segment and open it for the first time.
    val currentHandle = new AtomicReference<SegmentHandle>(currentStorage.get().create(segmentName, TIMEOUT).thenCompose(v -> currentStorage.get().openWrite(segmentName)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS));
    // Run a number of epochs.
    while (currentEpoch <= epochCount) {
        val oldStorage = currentStorage.get();
        val handle = currentHandle.get();
        val writeBuffer = new byte[writeSize];
        val appends = Futures.loop(() -> true, () -> {
            rnd.nextBytes(writeBuffer);
            return oldStorage.write(handle, writtenData.size(), new ByteArrayInputStream(writeBuffer), writeBuffer.length, TIMEOUT).thenRun(() -> writtenData.write(writeBuffer));
        }, executorService());
        // Create a new Storage adapter with a new epoch and open-write the Segment, remembering its handle.
        val newStorage = createStorage();
        try {
            newStorage.initialize(++currentEpoch);
            currentHandle.set(newStorage.openWrite(segmentName).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS));
        } catch (Exception ex) {
            newStorage.close();
            throw ex;
        }
        currentStorage.set(newStorage);
        try {
            appends.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
            Assert.fail("Continuous appends on older epoch Adapter did not fail.");
        } catch (Exception ex) {
            val cause = Exceptions.unwrap(ex);
            if (!(cause instanceof StorageNotPrimaryException || cause instanceof StreamSegmentSealedException || cause instanceof StreamSegmentNotExistsException)) {
                // We only expect the appends to fail because they were fenced out or the Segment was sealed.
                Assert.fail("Unexpected exception " + cause);
            }
        } finally {
            oldStorage.close();
        }
    }
    byte[] expectedData = writtenData.getData().getCopy();
    byte[] readData = new byte[expectedData.length];
    @Cleanup val readStorage = createStorage();
    readStorage.initialize(++currentEpoch);
    int bytesRead = readStorage.openRead(segmentName).thenCompose(handle -> readStorage.read(handle, 0, readData, 0, readData.length, TIMEOUT)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    Assert.assertEquals("Unexpected number of bytes read.", readData.length, bytesRead);
    Assert.assertArrayEquals("Unexpected data read back.", expectedData, readData);
}
Also used : lombok.val(lombok.val) Storage(io.pravega.segmentstore.storage.Storage) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Progressable(org.apache.hadoop.util.Progressable) ByteArrayOutputStream(java.io.ByteArrayOutputStream) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) SneakyThrows(lombok.SneakyThrows) AssertExtensions(io.pravega.test.common.AssertExtensions) FileSystem(org.apache.hadoop.fs.FileSystem) AclException(org.apache.hadoop.hdfs.protocol.AclException) Exceptions(io.pravega.common.Exceptions) StorageNotPrimaryException(io.pravega.segmentstore.storage.StorageNotPrimaryException) AsyncStorageWrapper(io.pravega.segmentstore.storage.AsyncStorageWrapper) Cleanup(lombok.Cleanup) Random(java.util.Random) AtomicReference(java.util.concurrent.atomic.AtomicReference) FsAction(org.apache.hadoop.fs.permission.FsAction) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) ByteArrayInputStream(java.io.ByteArrayInputStream) StorageTestBase(io.pravega.segmentstore.storage.StorageTestBase) Configuration(org.apache.hadoop.conf.Configuration) After(org.junit.After) Timeout(org.junit.rules.Timeout) Path(org.apache.hadoop.fs.Path) RollingStorageTestBase(io.pravega.segmentstore.storage.rolling.RollingStorageTestBase) Before(org.junit.Before) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Files(java.nio.file.Files) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) FileHelpers(io.pravega.common.io.FileHelpers) File(java.io.File) ByteBufferOutputStream(io.pravega.common.io.ByteBufferOutputStream) TimeUnit(java.util.concurrent.TimeUnit) Rule(org.junit.Rule) Assert(org.junit.Assert) Futures(io.pravega.common.concurrent.Futures) TemporaryFolder(org.junit.rules.TemporaryFolder) AtomicReference(java.util.concurrent.atomic.AtomicReference) Cleanup(lombok.Cleanup) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AclException(org.apache.hadoop.hdfs.protocol.AclException) StorageNotPrimaryException(io.pravega.segmentstore.storage.StorageNotPrimaryException) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) IOException(java.io.IOException) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) Random(java.util.Random) ByteBufferOutputStream(io.pravega.common.io.ByteBufferOutputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) StorageNotPrimaryException(io.pravega.segmentstore.storage.StorageNotPrimaryException) Test(org.junit.Test)

Example 29 with SegmentHandle

use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.

the class HDFSStorageTest method testFencing.

/**
 * Tests fencing abilities. We create two different Storage objects with different owner ids.
 * Part 1: Creation:
 * * We create the Segment on Storage1:
 * ** We verify that Storage1 can execute all operations.
 * ** We verify that Storage2 can execute only read-only operations.
 * * We open the Segment on Storage2:
 * ** We verify that Storage1 can execute only read-only operations.
 * ** We verify that Storage2 can execute all operations.
 */
@Test
@Override
public void testFencing() {
    final long epoch1 = 1;
    final long epoch2 = 2;
    final String segmentName = "segment";
    try (val storage1 = createStorage();
        val storage2 = createStorage()) {
        storage1.initialize(epoch1);
        storage2.initialize(epoch2);
        // Create segment in Storage1 (thus Storage1 owns it for now).
        storage1.create(segmentName, TIMEOUT).join();
        // Storage1 should be able to execute all operations.
        SegmentHandle handle1 = storage1.openWrite(segmentName).join();
        verifyWriteOperationsSucceed(handle1, storage1);
        verifyReadOnlyOperationsSucceed(handle1, storage1);
        // Open the segment in Storage2 (thus Storage2 owns it for now).
        SegmentHandle handle2 = storage2.openWrite(segmentName).join();
        // Storage1 should be able to execute only read-only operations.
        verifyWriteOperationsFail(handle1, storage1);
        verifyConcatOperationsFail(handle1, storage1);
        verifyReadOnlyOperationsSucceed(handle1, storage1);
        // Storage2 should be able to execute all operations.
        verifyReadOnlyOperationsSucceed(handle2, storage2);
        verifyWriteOperationsSucceed(handle2, storage2);
        // Seal and Delete (these should be run last, otherwise we can't run our test).
        verifyFinalWriteOperationsFail(handle1, storage1);
        verifyFinalWriteOperationsSucceed(handle2, storage2);
    }
}
Also used : lombok.val(lombok.val) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) Test(org.junit.Test)

Example 30 with SegmentHandle

use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.

the class HDFSStorageTest method testNormalRead.

/**
 * Tests a read scenario with no issues or failures.
 */
@Test
public void testNormalRead() throws Exception {
    // Write data.
    String segmentName = "foo_open";
    val rnd = new Random(0);
    try (Storage s = createStorage()) {
        s.initialize(DEFAULT_EPOCH);
        createSegment(segmentName, s);
        SegmentHandle handle = s.openWrite(segmentName).join();
        long expectedLength = 0;
        ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
        for (int i = 0; i < WRITE_COUNT; i++) {
            byte[] data = new byte[i + 1];
            rnd.nextBytes(data);
            s.write(handle, expectedLength, new ByteArrayInputStream(data), data.length, null).join();
            writtenData.write(data);
            expectedLength += data.length;
        }
        // Check written data via a Read Operation, from every offset from 0 to length/2
        byte[] expectedData = writtenData.toByteArray();
        val readHandle = s.openRead(segmentName).join();
        for (int startOffset = 0; startOffset < expectedLength / 2; startOffset++) {
            int readLength = (int) (expectedLength - 2 * startOffset);
            byte[] actualData = new byte[readLength];
            int readBytes = s.read(readHandle, startOffset, actualData, 0, actualData.length, null).join();
            Assert.assertEquals("Unexpected number of bytes read with start offset " + startOffset, actualData.length, readBytes);
            AssertExtensions.assertArrayEquals("Unexpected data read back with start offset " + startOffset, expectedData, startOffset, actualData, 0, readLength);
        }
    }
}
Also used : lombok.val(lombok.val) Storage(io.pravega.segmentstore.storage.Storage) Random(java.util.Random) ByteArrayInputStream(java.io.ByteArrayInputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) Test(org.junit.Test)

Aggregations

SegmentHandle (io.pravega.segmentstore.storage.SegmentHandle)43 lombok.val (lombok.val)27 Test (org.junit.Test)26 Storage (io.pravega.segmentstore.storage.Storage)20 ByteArrayInputStream (java.io.ByteArrayInputStream)20 StreamSegmentNotExistsException (io.pravega.segmentstore.contracts.StreamSegmentNotExistsException)14 SegmentProperties (io.pravega.segmentstore.contracts.SegmentProperties)13 StreamSegmentSealedException (io.pravega.segmentstore.contracts.StreamSegmentSealedException)12 StorageNotPrimaryException (io.pravega.segmentstore.storage.StorageNotPrimaryException)12 Exceptions (io.pravega.common.Exceptions)10 Cleanup (lombok.Cleanup)10 CompletableFuture (java.util.concurrent.CompletableFuture)9 Futures (io.pravega.common.concurrent.Futures)8 StreamSegmentExistsException (io.pravega.segmentstore.contracts.StreamSegmentExistsException)8 NameUtils (io.pravega.shared.NameUtils)8 List (java.util.List)8 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)8 Preconditions (com.google.common.base.Preconditions)7 StreamSegmentInformation (io.pravega.segmentstore.contracts.StreamSegmentInformation)7 SegmentRollingPolicy (io.pravega.segmentstore.storage.SegmentRollingPolicy)7