Search in sources :

Example 31 with SegmentProperties

use of io.pravega.segmentstore.contracts.SegmentProperties in project pravega by pravega.

the class SegmentAggregatorTests method testMerge.

/**
 * Tests the flush() method with Append and MergeTransactionOperations.
 * Overall strategy:
 * 1. Create one Parent Segment and N Transaction Segments.
 * 2. Populate all Transaction Segments with data.
 * 3. Seal the first N/2 Transaction Segments.
 * 4. Add some Appends, interspersed with Merge Transaction Ops to the Parent (for all Transactions)
 * 5. Call flush() repeatedly on all Segments, until nothing is flushed anymore. Verify only the first N/2 Transactions were merged.
 * 6. Seal the remaining N/2 Transaction Segments
 * 7. Call flush() repeatedly on all Segments, until nothing is flushed anymore. Verify all Transactions were merged.
 * 8. Verify the Parent Segment has all the data (from itself and its Transactions), in the correct order.
 */
@Test
@SuppressWarnings("checkstyle:CyclomaticComplexity")
public void testMerge() throws Exception {
    // This is number of appends per Segment/Transaction - there will be a lot of appends here.
    final int appendCount = 100;
    final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
    appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
    @Cleanup TestContext context = new TestContext(config);
    // Create and initialize all segments.
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT).join();
    for (SegmentAggregator a : context.transactionAggregators) {
        context.storage.create(a.getMetadata().getName(), TIMEOUT).join();
        a.initialize(TIMEOUT).join();
    }
    // Store written data by segment - so we can check it later.
    HashMap<Long, ByteArrayOutputStream> dataBySegment = new HashMap<>();
    val actualMergeOpAck = new ArrayList<Map.Entry<Long, Long>>();
    context.dataSource.setCompleteMergeCallback((target, source) -> actualMergeOpAck.add(new AbstractMap.SimpleImmutableEntry<Long, Long>(target, source)));
    // Add a few appends to each Transaction aggregator and to the parent aggregator.
    // Seal the first half of the Transaction aggregators (thus, those Transactions will be fully flushed).
    HashSet<Long> sealedTransactionIds = new HashSet<>();
    for (int i = 0; i < context.transactionAggregators.length; i++) {
        SegmentAggregator transactionAggregator = context.transactionAggregators[i];
        long transactionId = transactionAggregator.getMetadata().getId();
        ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
        dataBySegment.put(transactionId, writtenData);
        for (int appendId = 0; appendId < appendCount; appendId++) {
            StorageOperation appendOp = generateAppendAndUpdateMetadata(appendId, transactionId, context);
            transactionAggregator.add(appendOp);
            getAppendData(appendOp, writtenData, context);
        }
        if (i < context.transactionAggregators.length / 2) {
            // We only seal the first half.
            transactionAggregator.add(generateSealAndUpdateMetadata(transactionId, context));
            sealedTransactionIds.add(transactionId);
        }
    }
    // Add MergeTransactionOperations to the parent aggregator, making sure we have both the following cases:
    // * Two or more consecutive MergeTransactionOperations both for Transactions that are sealed and for those that are not.
    // * MergeTransactionOperations with appends interspersed between them (in the parent), both for sealed Transactions and non-sealed Transactions.
    long parentSegmentId = context.segmentAggregator.getMetadata().getId();
    @Cleanup ByteArrayOutputStream parentData = new ByteArrayOutputStream();
    for (int transIndex = 0; transIndex < context.transactionAggregators.length; transIndex++) {
        // This helps ensure that we have both interspersed appends, and consecutive MergeTransactionOperations in the parent.
        if (transIndex % 2 == 1) {
            StorageOperation appendOp = generateAppendAndUpdateMetadata(transIndex, parentSegmentId, context);
            context.segmentAggregator.add(appendOp);
            getAppendData(appendOp, parentData, context);
        }
        // Merge this Transaction into the parent & record its data in the final parent data array.
        long transactionId = context.transactionAggregators[transIndex].getMetadata().getId();
        context.segmentAggregator.add(generateMergeTransactionAndUpdateMetadata(transactionId, context));
        ByteArrayOutputStream transactionData = dataBySegment.get(transactionId);
        parentData.write(transactionData.toByteArray());
        transactionData.close();
    }
    // Flush all the Aggregators as long as at least one of them reports being able to flush and that it did flush something.
    flushAllSegments(context);
    // Now check to see that only those Transactions that were sealed were merged.
    for (SegmentAggregator transactionAggregator : context.transactionAggregators) {
        SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
        boolean expectedMerged = sealedTransactionIds.contains(transactionMetadata.getId());
        if (expectedMerged) {
            Assert.assertTrue("Transaction to be merged was not marked as deleted in metadata.", transactionMetadata.isDeleted());
            Assert.assertFalse("Transaction to be merged still exists in storage.", context.storage.exists(transactionMetadata.getName(), TIMEOUT).join());
        } else {
            Assert.assertFalse("Transaction not to be merged was marked as deleted in metadata.", transactionMetadata.isDeleted());
            SegmentProperties sp = context.storage.getStreamSegmentInfo(transactionMetadata.getName(), TIMEOUT).join();
            Assert.assertFalse("Transaction not to be merged is sealed in storage.", sp.isSealed());
        }
    }
    // Then seal the rest of the Transactions and re-run the flush on the parent a few times.
    for (SegmentAggregator a : context.transactionAggregators) {
        long transactionId = a.getMetadata().getId();
        if (!sealedTransactionIds.contains(transactionId)) {
            // This Transaction was not sealed (and merged) previously. Do it now.
            a.add(generateSealAndUpdateMetadata(transactionId, context));
            sealedTransactionIds.add(transactionId);
        }
    }
    // Flush all the Aggregators as long as at least one of them reports being able to flush and that it did flush something.
    flushAllSegments(context);
    // Verify that all Transactions are now fully merged.
    for (SegmentAggregator transactionAggregator : context.transactionAggregators) {
        SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
        Assert.assertTrue("Merged Transaction was not marked as deleted in metadata.", transactionMetadata.isDeleted());
        Assert.assertFalse("Merged Transaction still exists in storage.", context.storage.exists(transactionMetadata.getName(), TIMEOUT).join());
    }
    // Verify that in the end, the contents of the parents is as expected.
    verifySegmentData(parentData.toByteArray(), context);
    // Verify calls to completeMerge.
    val expectedMergeOpSources = Arrays.stream(context.transactionAggregators).map(a -> a.getMetadata().getId()).collect(Collectors.toSet());
    Assert.assertEquals("Unexpected number of calls to completeMerge.", expectedMergeOpSources.size(), actualMergeOpAck.size());
    val actualMergeOpSources = actualMergeOpAck.stream().map(Map.Entry::getValue).collect(Collectors.toSet());
    AssertExtensions.assertContainsSameElements("Unexpected sources for invocation to completeMerge.", expectedMergeOpSources, actualMergeOpSources);
    for (Map.Entry<Long, Long> e : actualMergeOpAck) {
        Assert.assertEquals("Unexpected target for invocation to completeMerge.", context.segmentAggregator.getMetadata().getId(), (long) e.getKey());
    }
}
Also used : lombok.val(lombok.val) Arrays(java.util.Arrays) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AssertExtensions(io.pravega.test.common.AssertExtensions) RequiredArgsConstructor(lombok.RequiredArgsConstructor) Cleanup(lombok.Cleanup) Random(java.util.Random) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) ByteArrayInputStream(java.io.ByteArrayInputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Duration(java.time.Duration) Map(java.util.Map) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) InMemoryStorage(io.pravega.segmentstore.storage.mocks.InMemoryStorage) Collectors(java.util.stream.Collectors) ErrorInjector(io.pravega.test.common.ErrorInjector) IOUtils(org.apache.commons.io.IOUtils) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) TestStorage(io.pravega.segmentstore.server.TestStorage) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Exceptions(io.pravega.common.Exceptions) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) Timeout(org.junit.rules.Timeout) OutputStream(java.io.OutputStream) ManualTimer(io.pravega.segmentstore.server.ManualTimer) FixedByteArrayOutputStream(io.pravega.common.io.FixedByteArrayOutputStream) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) AbstractMap(java.util.AbstractMap) Rule(org.junit.Rule) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) TreeMap(java.util.TreeMap) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) MergeTransactionOperation(io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) InputStream(java.io.InputStream) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ByteArrayOutputStream(java.io.ByteArrayOutputStream) FixedByteArrayOutputStream(io.pravega.common.io.FixedByteArrayOutputStream) Cleanup(lombok.Cleanup) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) AtomicLong(java.util.concurrent.atomic.AtomicLong) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) Map(java.util.Map) HashMap(java.util.HashMap) AbstractMap(java.util.AbstractMap) TreeMap(java.util.TreeMap) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 32 with SegmentProperties

use of io.pravega.segmentstore.contracts.SegmentProperties in project pravega by pravega.

the class SegmentAggregatorTests method testTruncateAndSeal.

/**
 * Tests the flush() method with StreamSegmentTruncateOperations after the segment has been Sealed.
 */
@Test
public void testTruncateAndSeal() throws Exception {
    // Add some data and intersperse with truncates.
    final int appendCount = 1000;
    final int truncateEvery = 20;
    final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
    appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
    @Cleanup TestContext context = new TestContext(config);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT).join();
    @Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
    // Accumulate some Appends
    AtomicLong outstandingSize = new AtomicLong();
    SequenceNumberCalculator sequenceNumbers = new SequenceNumberCalculator(context, outstandingSize);
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        outstandingSize.addAndGet(appendOp.getLength());
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);
        sequenceNumbers.record(appendOp);
        if (i % truncateEvery == 1) {
            StorageOperation truncateOp = generateTruncateAndUpdateMetadata(SEGMENT_ID, context);
            context.segmentAggregator.add(truncateOp);
            sequenceNumbers.record(truncateOp);
        }
    }
    // Generate and add a Seal Operation.
    StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context);
    context.segmentAggregator.add(sealOp);
    // Add another truncate op, after the Seal.
    StorageOperation lastTruncateOp = generateTruncateAndUpdateMetadata(SEGMENT_ID, context);
    context.segmentAggregator.add(lastTruncateOp);
    FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
    Assert.assertEquals("Expected the entire Aggregator to be flushed.", outstandingSize.get(), flushResult.getFlushedBytes());
    Assert.assertFalse("Unexpected value returned by mustFlush() after flushing.", context.segmentAggregator.mustFlush());
    Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flushing.", Operation.NO_SEQUENCE_NUMBER, context.segmentAggregator.getLowestUncommittedSequenceNumber());
    // Verify data.
    byte[] expectedData = writtenData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    SegmentProperties storageInfo = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageInfo.getLength());
    Assert.assertTrue("Unexpected sealed status in Storage.", storageInfo.isSealed());
    Assert.assertEquals("Unexpected truncation offset in Storage.", lastTruncateOp.getStreamSegmentOffset(), context.storage.getTruncationOffset(context.segmentAggregator.getMetadata().getName()));
    context.storage.read(InMemoryStorage.newHandle(context.segmentAggregator.getMetadata().getName(), false), 0, actualData, 0, actualData.length, TIMEOUT).join();
    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
Also used : AtomicLong(java.util.concurrent.atomic.AtomicLong) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) ByteArrayOutputStream(java.io.ByteArrayOutputStream) FixedByteArrayOutputStream(io.pravega.common.io.FixedByteArrayOutputStream) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Example 33 with SegmentProperties

use of io.pravega.segmentstore.contracts.SegmentProperties in project pravega by pravega.

the class StorageWriterTests method verifyFinalOutput.

// region Helpers
private void verifyFinalOutput(HashMap<Long, ByteArrayOutputStream> segmentContents, Collection<Long> transactionIds, TestContext context) {
    // Verify all Transactions are deleted.
    for (long transactionId : transactionIds) {
        SegmentMetadata metadata = context.metadata.getStreamSegmentMetadata(transactionId);
        Assert.assertTrue("Transaction not marked as deleted in metadata: " + transactionId, metadata.isDeleted());
        Assert.assertFalse("Transaction was not deleted from storage after being merged: " + transactionId, context.storage.exists(metadata.getName(), TIMEOUT).join());
    }
    for (long segmentId : segmentContents.keySet()) {
        SegmentMetadata metadata = context.metadata.getStreamSegmentMetadata(segmentId);
        Assert.assertNotNull("Setup error: No metadata for segment " + segmentId, metadata);
        Assert.assertEquals("Setup error: Not expecting a Transaction segment in the final list: " + segmentId, ContainerMetadata.NO_STREAM_SEGMENT_ID, metadata.getParentId());
        Assert.assertEquals("Metadata does not indicate that all bytes were copied to Storage for segment " + segmentId, metadata.getLength(), metadata.getStorageLength());
        Assert.assertEquals("Metadata.Sealed disagrees with Metadata.SealedInStorage for segment " + segmentId, metadata.isSealed(), metadata.isSealedInStorage());
        SegmentProperties sp = context.storage.getStreamSegmentInfo(metadata.getName(), TIMEOUT).join();
        Assert.assertEquals("Metadata.StorageLength disagrees with Storage.Length for segment " + segmentId, metadata.getStorageLength(), sp.getLength());
        Assert.assertEquals("Metadata.Sealed/SealedInStorage disagrees with Storage.Sealed for segment " + segmentId, metadata.isSealedInStorage(), sp.isSealed());
        byte[] expected = segmentContents.get(segmentId).toByteArray();
        byte[] actual = new byte[expected.length];
        int actualLength = context.storage.read(InMemoryStorage.newHandle(metadata.getName(), true), 0, actual, 0, actual.length, TIMEOUT).join();
        Assert.assertEquals("Unexpected number of bytes read from Storage for segment " + segmentId, metadata.getStorageLength(), actualLength);
        Assert.assertArrayEquals("Unexpected data written to storage for segment " + segmentId, expected, actual);
        Assert.assertEquals("Unexpected truncation offset for segment " + segmentId, metadata.getStartOffset(), context.storage.getTruncationOffset(metadata.getName()));
    }
}
Also used : UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties)

Example 34 with SegmentProperties

use of io.pravega.segmentstore.contracts.SegmentProperties in project pravega by pravega.

the class ExtendedS3Storage method doConcat.

/**
 * The concat is implemented using extended S3 implementation of multipart copy API. Please see here for
 * more detail on multipart copy:
 * http://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingLLJavaMPUapi.html
 *
 * The multipart copy is an atomic operation. We schedule two parts and commit them atomically using
 * completeMultiPartUpload call. Specifically, to concatenate, we are copying the target segment T and the
 * source segment S to T, so essentially we are doing T <- T + S.
 */
private Void doConcat(SegmentHandle targetHandle, long offset, String sourceSegment) throws StreamSegmentNotExistsException {
    Preconditions.checkArgument(!targetHandle.isReadOnly(), "target handle must not be read-only.");
    long traceId = LoggerHelpers.traceEnter(log, "concat", targetHandle.getSegmentName(), offset, sourceSegment);
    SortedSet<MultipartPartETag> partEtags = new TreeSet<>();
    String targetPath = config.getRoot() + targetHandle.getSegmentName();
    String uploadId = client.initiateMultipartUpload(config.getBucket(), targetPath);
    // check whether the target exists
    if (!doExists(targetHandle.getSegmentName())) {
        throw new StreamSegmentNotExistsException(targetHandle.getSegmentName());
    }
    // check whether the source is sealed
    SegmentProperties si = doGetStreamSegmentInfo(sourceSegment);
    Preconditions.checkState(si.isSealed(), "Cannot concat segment '%s' into '%s' because it is not sealed.", sourceSegment, targetHandle.getSegmentName());
    // Copy the first part
    CopyPartRequest copyRequest = new CopyPartRequest(config.getBucket(), targetPath, config.getBucket(), targetPath, uploadId, 1).withSourceRange(Range.fromOffsetLength(0, offset));
    CopyPartResult copyResult = client.copyPart(copyRequest);
    partEtags.add(new MultipartPartETag(copyResult.getPartNumber(), copyResult.getETag()));
    // Copy the second part
    S3ObjectMetadata metadataResult = client.getObjectMetadata(config.getBucket(), config.getRoot() + sourceSegment);
    // in bytes
    long objectSize = metadataResult.getContentLength();
    copyRequest = new CopyPartRequest(config.getBucket(), config.getRoot() + sourceSegment, config.getBucket(), targetPath, uploadId, 2).withSourceRange(Range.fromOffsetLength(0, objectSize));
    copyResult = client.copyPart(copyRequest);
    partEtags.add(new MultipartPartETag(copyResult.getPartNumber(), copyResult.getETag()));
    // Close the upload
    client.completeMultipartUpload(new CompleteMultipartUploadRequest(config.getBucket(), targetPath, uploadId).withParts(partEtags));
    client.deleteObject(config.getBucket(), config.getRoot() + sourceSegment);
    LoggerHelpers.traceLeave(log, "concat", traceId);
    return null;
}
Also used : CopyPartRequest(com.emc.object.s3.request.CopyPartRequest) TreeSet(java.util.TreeSet) MultipartPartETag(com.emc.object.s3.bean.MultipartPartETag) S3ObjectMetadata(com.emc.object.s3.S3ObjectMetadata) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) CopyPartResult(com.emc.object.s3.bean.CopyPartResult) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) CompleteMultipartUploadRequest(com.emc.object.s3.request.CompleteMultipartUploadRequest)

Example 35 with SegmentProperties

use of io.pravega.segmentstore.contracts.SegmentProperties in project pravega by pravega.

the class ExtendedS3Storage method doWrite.

private Void doWrite(SegmentHandle handle, long offset, InputStream data, int length) throws StreamSegmentException {
    Preconditions.checkArgument(!handle.isReadOnly(), "handle must not be read-only.");
    long traceId = LoggerHelpers.traceEnter(log, "write", handle.getSegmentName(), offset, length);
    SegmentProperties si = doGetStreamSegmentInfo(handle.getSegmentName());
    if (si.isSealed()) {
        throw new StreamSegmentSealedException(handle.getSegmentName());
    }
    if (si.getLength() != offset) {
        throw new BadOffsetException(handle.getSegmentName(), si.getLength(), offset);
    }
    client.putObject(this.config.getBucket(), this.config.getRoot() + handle.getSegmentName(), Range.fromOffsetLength(offset, length), data);
    LoggerHelpers.traceLeave(log, "write", traceId);
    return null;
}
Also used : StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties)

Aggregations

SegmentProperties (io.pravega.segmentstore.contracts.SegmentProperties)43 Test (org.junit.Test)24 Cleanup (lombok.Cleanup)22 AtomicLong (java.util.concurrent.atomic.AtomicLong)19 StreamSegmentNotExistsException (io.pravega.segmentstore.contracts.StreamSegmentNotExistsException)18 lombok.val (lombok.val)18 HashMap (java.util.HashMap)17 CompletableFuture (java.util.concurrent.CompletableFuture)17 ArrayList (java.util.ArrayList)16 ByteArrayOutputStream (java.io.ByteArrayOutputStream)15 SegmentMetadata (io.pravega.segmentstore.server.SegmentMetadata)13 BadOffsetException (io.pravega.segmentstore.contracts.BadOffsetException)11 UUID (java.util.UUID)11 Exceptions (io.pravega.common.Exceptions)10 Duration (java.time.Duration)10 Map (java.util.Map)10 AtomicReference (java.util.concurrent.atomic.AtomicReference)10 StorageOperation (io.pravega.segmentstore.server.logs.operations.StorageOperation)9 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)9 Futures (io.pravega.common.concurrent.Futures)8