Search in sources :

Example 21 with CompletionException

use of java.util.concurrent.CompletionException in project pravega by pravega.

the class ContainerReadIndexTests method testCacheEviction.

/**
 * Tests the ability to evict entries from the ReadIndex under various conditions:
 * * If an entry is aged out
 * * If an entry is pushed out because of cache space pressure.
 * <p>
 * This also verifies that certain entries, such as RedirectReadIndexEntries and entries after the Storage Offset are
 * not removed.
 * <p>
 * The way this test goes is as follows (it's pretty subtle, because there aren't many ways to hook into the ReadIndex and see what it's doing)
 * 1. It creates a bunch of segments, and populates them in storage (each) up to offset N/2-1 (this is called pre-storage)
 * 2. It populates the ReadIndex for each of those segments from offset N/2 to offset N-1 (this is called post-storage)
 * 3. It loads all the data from Storage into the ReadIndex, in entries of size equal to those already loaded in step #2.
 * 3a. At this point, all the entries added in step #2 have Generations 0..A/4-1, and step #3 have generations A/4..A-1
 * 4. Append more data at the end. This forces the generation to increase to 1.25A.
 * 4a. Nothing should be evicted from the cache now, since the earliest items are all post-storage.
 * 5. We 'touch' (read) the first 1/3 of pre-storage entries (offsets 0..N/4).
 * 5a. At this point, those entries (offsets 0..N/6) will have the newest generations (1.25A..1.5A)
 * 6. We append more data (equivalent to the data we touched)
 * 6a. Nothing should be evicted, since those generations that were just eligible for removal were touched and bumped up.
 * 7. We forcefully increase the current generation by 1 (without touching the ReadIndex)
 * 7a. At this point, we expect all the pre-storage items, except the touched ones, to be evicted. This is generations 0.25A-0.75A.
 * 8. Update the metadata and indicate that all the post-storage entries are now pre-storage and bump the generation by 0.75A.
 * 8a. At this point, we expect all former post-storage items and pre-storage items to be evicted (in this order).
 * <p>
 * The final order of eviction (in terms of offsets, for each segment), is:
 * * 0.25N-0.75N, 0.75N..N, N..1.25N, 0..0.25N, 1.25N..1.5N (remember that we added quite a bunch of items after the initial run).
 */
@Test
@SuppressWarnings("checkstyle:CyclomaticComplexity")
public void testCacheEviction() throws Exception {
    // Create a CachePolicy with a set number of generations and a known max size.
    // Each generation contains exactly one entry, so the number of generations is also the number of entries.
    final int appendSize = 100;
    // This also doubles as number of generations (each generation, we add one append for each segment).
    final int entriesPerSegment = 100;
    final int cacheMaxSize = SEGMENT_COUNT * entriesPerSegment * appendSize;
    // 25% of the entries are beyond the StorageOffset
    final int postStorageEntryCount = entriesPerSegment / 4;
    // 75% of the entries are before the StorageOffset.
    final int preStorageEntryCount = entriesPerSegment - postStorageEntryCount;
    CachePolicy cachePolicy = new CachePolicy(cacheMaxSize, Duration.ofMillis(1000 * 2 * entriesPerSegment), Duration.ofMillis(1000));
    // To properly test this, we want predictable storage reads.
    ReadIndexConfig config = ConfigHelpers.withInfiniteCachePolicy(ReadIndexConfig.builder().with(ReadIndexConfig.STORAGE_READ_ALIGNMENT, appendSize)).build();
    ArrayList<CacheKey> removedKeys = new ArrayList<>();
    @Cleanup TestContext context = new TestContext(config, cachePolicy);
    // Record every cache removal.
    context.cacheFactory.cache.removeCallback = removedKeys::add;
    // Create the segments (metadata + storage).
    ArrayList<Long> segmentIds = createSegments(context);
    createSegmentsInStorage(context);
    // Populate the Storage with appropriate data.
    byte[] preStorageData = new byte[preStorageEntryCount * appendSize];
    for (long segmentId : segmentIds) {
        UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
        val handle = context.storage.openWrite(sm.getName()).join();
        context.storage.write(handle, 0, new ByteArrayInputStream(preStorageData), preStorageData.length, TIMEOUT).join();
        sm.setStorageLength(preStorageData.length);
        sm.setLength(preStorageData.length);
    }
    // Callback that appends one entry at the end of the given segment id.
    Consumer<Long> appendOneEntry = segmentId -> {
        UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
        byte[] data = new byte[appendSize];
        long offset = sm.getLength();
        sm.setLength(offset + data.length);
        try {
            context.readIndex.append(segmentId, offset, data);
        } catch (StreamSegmentNotExistsException ex) {
            throw new CompletionException(ex);
        }
    };
    // Populate the ReadIndex with the Append entries (post-StorageOffset)
    for (int i = 0; i < postStorageEntryCount; i++) {
        segmentIds.forEach(appendOneEntry);
        // Each time we make a round of appends (one per segment), we increment the generation in the CacheManager.
        context.cacheManager.applyCachePolicy();
    }
    // Read all the data from Storage, making sure we carefully associate them with the proper generation.
    for (int i = 0; i < preStorageEntryCount; i++) {
        long offset = i * appendSize;
        for (long segmentId : segmentIds) {
            @Cleanup ReadResult result = context.readIndex.read(segmentId, offset, appendSize, TIMEOUT);
            ReadResultEntry resultEntry = result.next();
            Assert.assertEquals("Unexpected type of ReadResultEntry when trying to load up data into the ReadIndex Cache.", ReadResultEntryType.Storage, resultEntry.getType());
            resultEntry.requestContent(TIMEOUT);
            ReadResultEntryContents contents = resultEntry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
            Assert.assertFalse("Not expecting more data to be available for reading.", result.hasNext());
            Assert.assertEquals("Unexpected ReadResultEntry length when trying to load up data into the ReadIndex Cache.", appendSize, contents.getLength());
        }
        context.cacheManager.applyCachePolicy();
    }
    Assert.assertEquals("Not expecting any removed Cache entries at this point (cache is not full).", 0, removedKeys.size());
    // Append more data (equivalent to all post-storage entries), and verify that NO entries are being evicted (we cannot evict post-storage entries).
    for (int i = 0; i < postStorageEntryCount; i++) {
        segmentIds.forEach(appendOneEntry);
        context.cacheManager.applyCachePolicy();
    }
    Assert.assertEquals("Not expecting any removed Cache entries at this point (only eligible entries were post-storage).", 0, removedKeys.size());
    // 'Touch' the first few entries read from storage. This should move them to the back of the queue (they won't be the first ones to be evicted).
    int touchCount = preStorageEntryCount / 3;
    for (int i = 0; i < touchCount; i++) {
        long offset = i * appendSize;
        for (long segmentId : segmentIds) {
            @Cleanup ReadResult result = context.readIndex.read(segmentId, offset, appendSize, TIMEOUT);
            ReadResultEntry resultEntry = result.next();
            Assert.assertEquals("Unexpected type of ReadResultEntry when trying to load up data into the ReadIndex Cache.", ReadResultEntryType.Cache, resultEntry.getType());
        }
    }
    // Append more data (equivalent to the amount of data we 'touched'), and verify that the entries we just touched are not being removed..
    for (int i = 0; i < touchCount; i++) {
        segmentIds.forEach(appendOneEntry);
        context.cacheManager.applyCachePolicy();
    }
    Assert.assertEquals("Not expecting any removed Cache entries at this point (we touched old entries and they now have the newest generation).", 0, removedKeys.size());
    // Increment the generations so that we are caught up to just before the generation where the "touched" items now live.
    context.cacheManager.applyCachePolicy();
    // We expect all but the 'touchCount' pre-Storage entries to be removed.
    int expectedRemovalCount = (preStorageEntryCount - touchCount) * SEGMENT_COUNT;
    Assert.assertEquals("Unexpected number of removed entries after having forced out all pre-storage entries.", expectedRemovalCount, removedKeys.size());
    // Now update the metadata and indicate that all the post-storage data has been moved to storage.
    segmentIds.forEach(segmentId -> {
        UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
        sm.setStorageLength(sm.getLength());
    });
    // We add one artificial entry, which we'll be touching forever and ever; this forces the CacheManager to
    // update its current generation every time. We will be ignoring this entry for our test.
    SegmentMetadata readSegment = context.metadata.getStreamSegmentMetadata(segmentIds.get(0));
    appendOneEntry.accept(readSegment.getId());
    // Now evict everything (whether by size of by aging out).
    for (int i = 0; i < cachePolicy.getMaxGenerations(); i++) {
        @Cleanup ReadResult result = context.readIndex.read(readSegment.getId(), readSegment.getLength() - appendSize, appendSize, TIMEOUT);
        result.next();
        context.cacheManager.applyCachePolicy();
    }
    int expectedRemovalCountPerSegment = entriesPerSegment + touchCount + postStorageEntryCount;
    int expectedTotalRemovalCount = SEGMENT_COUNT * expectedRemovalCountPerSegment;
    Assert.assertEquals("Unexpected number of removed entries after having forced out all the entries.", expectedTotalRemovalCount, removedKeys.size());
    // Finally, verify that the evicted items are in the correct order (for each segment). See this test's description for details.
    for (long segmentId : segmentIds) {
        List<CacheKey> segmentRemovedKeys = removedKeys.stream().filter(key -> key.getStreamSegmentId() == segmentId).collect(Collectors.toList());
        Assert.assertEquals("Unexpected number of removed entries for segment " + segmentId, expectedRemovalCountPerSegment, segmentRemovedKeys.size());
        // The correct order of eviction (N=entriesPerSegment) is: 0.25N-0.75N, 0.75N..N, N..1.25N, 0..0.25N, 1.25N..1.5N.
        // This is equivalent to the following tests
        // 0.25N-1.25N
        checkOffsets(segmentRemovedKeys, segmentId, 0, entriesPerSegment, entriesPerSegment * appendSize / 4, appendSize);
        // 0..0.25N
        checkOffsets(segmentRemovedKeys, segmentId, entriesPerSegment, entriesPerSegment / 4, 0, appendSize);
        // 1.25N..1.5N
        checkOffsets(segmentRemovedKeys, segmentId, entriesPerSegment + entriesPerSegment / 4, entriesPerSegment / 4, (int) (entriesPerSegment * appendSize * 1.25), appendSize);
    }
}
Also used : Storage(io.pravega.segmentstore.storage.Storage) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AssertExtensions(io.pravega.test.common.AssertExtensions) CacheKey(io.pravega.segmentstore.server.CacheKey) Cleanup(lombok.Cleanup) Random(java.util.Random) InMemoryCache(io.pravega.segmentstore.storage.mocks.InMemoryCache) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) ByteArrayInputStream(java.io.ByteArrayInputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ReadResultEntryContents(io.pravega.segmentstore.contracts.ReadResultEntryContents) InMemoryStorageFactory(io.pravega.segmentstore.storage.mocks.InMemoryStorageFactory) Duration(java.time.Duration) Map(java.util.Map) CacheFactory(io.pravega.segmentstore.storage.CacheFactory) Collection(java.util.Collection) CompletionException(java.util.concurrent.CompletionException) ReadResultEntryType(io.pravega.segmentstore.contracts.ReadResultEntryType) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) StreamSegmentNameUtils(io.pravega.shared.segment.StreamSegmentNameUtils) UncheckedIOException(java.io.UncheckedIOException) List(java.util.List) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) Futures(io.pravega.common.concurrent.Futures) ReadResult(io.pravega.segmentstore.contracts.ReadResult) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) ConfigHelpers(io.pravega.segmentstore.server.ConfigHelpers) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) ReadResultEntry(io.pravega.segmentstore.contracts.ReadResultEntry) BiConsumer(java.util.function.BiConsumer) Timeout(org.junit.rules.Timeout) StreamHelpers(io.pravega.common.io.StreamHelpers) StreamSegmentTruncatedException(io.pravega.segmentstore.contracts.StreamSegmentTruncatedException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) AtomicLong(java.util.concurrent.atomic.AtomicLong) Rule(org.junit.Rule) Assert(org.junit.Assert) Collections(java.util.Collections) Cache(io.pravega.segmentstore.storage.Cache) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) ReadResult(io.pravega.segmentstore.contracts.ReadResult) Cleanup(lombok.Cleanup) CacheKey(io.pravega.segmentstore.server.CacheKey) lombok.val(lombok.val) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) ReadResultEntryContents(io.pravega.segmentstore.contracts.ReadResultEntryContents) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) ByteArrayInputStream(java.io.ByteArrayInputStream) CompletionException(java.util.concurrent.CompletionException) ReadResultEntry(io.pravega.segmentstore.contracts.ReadResultEntry) AtomicLong(java.util.concurrent.atomic.AtomicLong) Test(org.junit.Test)

Example 22 with CompletionException

use of java.util.concurrent.CompletionException in project pravega by pravega.

the class ContainerReadIndexTests method testReadDirect.

/**
 * Tests the readDirect() method on the ReadIndex.
 */
@Test
public void testReadDirect() throws Exception {
    final int randomAppendLength = 1024;
    @Cleanup TestContext context = new TestContext();
    ArrayList<Long> segmentIds = new ArrayList<>();
    final long segmentId = createSegment(0, context);
    final UpdateableSegmentMetadata segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
    segmentIds.add(segmentId);
    HashMap<Long, ArrayList<Long>> transactionsBySegment = createTransactions(segmentIds, 1, context);
    final long mergedTxId = transactionsBySegment.get(segmentId).get(0);
    // Add data to all segments.
    HashMap<Long, ByteArrayOutputStream> segmentContents = new HashMap<>();
    transactionsBySegment.values().forEach(segmentIds::addAll);
    appendData(segmentIds, segmentContents, context);
    // Mark everything so far (minus a few bytes) as being written to storage.
    segmentMetadata.setStorageLength(segmentMetadata.getLength() - 100);
    // Now partially merge a second transaction
    final long mergedTxOffset = beginMergeTransaction(mergedTxId, segmentMetadata, segmentContents, context);
    // Add one more append after all of this.
    final long endOfMergedDataOffset = segmentMetadata.getLength();
    byte[] appendData = new byte[randomAppendLength];
    new Random(0).nextBytes(appendData);
    appendSingleWrite(segmentId, appendData, context);
    recordAppend(segmentId, appendData, segmentContents);
    // Verify we are not allowed to read from the range which has already been committed to Storage (invalid arguments).
    for (AtomicLong offset = new AtomicLong(0); offset.get() < segmentMetadata.getStorageLength(); offset.incrementAndGet()) {
        AssertExtensions.assertThrows(String.format("readDirect allowed reading from an illegal offset (%s).", offset), () -> context.readIndex.readDirect(segmentId, offset.get(), 1), ex -> ex instanceof IllegalArgumentException);
    }
    // Verify that any reads overlapping a merged transaction return null (that is, we cannot retrieve the requested data).
    for (long offset = mergedTxOffset - 1; offset < endOfMergedDataOffset; offset++) {
        InputStream resultStream = context.readIndex.readDirect(segmentId, offset, 2);
        Assert.assertNull("readDirect() returned data overlapping a partially merged transaction", resultStream);
    }
    // Verify that we can read from any other offset.
    final byte[] expectedData = segmentContents.get(segmentId).toByteArray();
    BiConsumer<Long, Long> verifyReadResult = (startOffset, endOffset) -> {
        int readLength = (int) (endOffset - startOffset);
        while (readLength > 0) {
            InputStream actualDataStream;
            try {
                actualDataStream = context.readIndex.readDirect(segmentId, startOffset, readLength);
            } catch (StreamSegmentNotExistsException ex) {
                throw new CompletionException(ex);
            }
            Assert.assertNotNull(String.format("Unexpected result when data is readily available for Offset = %s, Length = %s.", startOffset, readLength), actualDataStream);
            byte[] actualData = new byte[readLength];
            try {
                int bytesCopied = StreamHelpers.readAll(actualDataStream, actualData, 0, readLength);
                Assert.assertEquals(String.format("Unexpected number of bytes read for Offset = %s, Length = %s (pre-partial-merge).", startOffset, readLength), readLength, bytesCopied);
            } catch (IOException ex) {
                // Technically not possible.
                throw new UncheckedIOException(ex);
            }
            AssertExtensions.assertArrayEquals("Unexpected data read from the segment at offset " + startOffset, expectedData, startOffset.intValue(), actualData, 0, actualData.length);
            // Setup the read for the next test (where we read 1 less byte than now).
            readLength--;
            if (readLength % 2 == 0) {
                // For every 2 bytes of decreased read length, increase the start offset by 1. This allows for a greater
                // number of combinations to be tested.
                startOffset++;
            }
        }
    };
    // Verify that we can read the cached data just after the StorageLength but before the merged transaction.
    verifyReadResult.accept(segmentMetadata.getStorageLength(), mergedTxOffset);
    // Verify that we can read the cached data just after the merged transaction but before the end of the segment.
    verifyReadResult.accept(endOfMergedDataOffset, segmentMetadata.getLength());
}
Also used : Storage(io.pravega.segmentstore.storage.Storage) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AssertExtensions(io.pravega.test.common.AssertExtensions) CacheKey(io.pravega.segmentstore.server.CacheKey) Cleanup(lombok.Cleanup) Random(java.util.Random) InMemoryCache(io.pravega.segmentstore.storage.mocks.InMemoryCache) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) ByteArrayInputStream(java.io.ByteArrayInputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ReadResultEntryContents(io.pravega.segmentstore.contracts.ReadResultEntryContents) InMemoryStorageFactory(io.pravega.segmentstore.storage.mocks.InMemoryStorageFactory) Duration(java.time.Duration) Map(java.util.Map) CacheFactory(io.pravega.segmentstore.storage.CacheFactory) Collection(java.util.Collection) CompletionException(java.util.concurrent.CompletionException) ReadResultEntryType(io.pravega.segmentstore.contracts.ReadResultEntryType) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) StreamSegmentNameUtils(io.pravega.shared.segment.StreamSegmentNameUtils) UncheckedIOException(java.io.UncheckedIOException) List(java.util.List) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) Futures(io.pravega.common.concurrent.Futures) ReadResult(io.pravega.segmentstore.contracts.ReadResult) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) ConfigHelpers(io.pravega.segmentstore.server.ConfigHelpers) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) ReadResultEntry(io.pravega.segmentstore.contracts.ReadResultEntry) BiConsumer(java.util.function.BiConsumer) Timeout(org.junit.rules.Timeout) StreamHelpers(io.pravega.common.io.StreamHelpers) StreamSegmentTruncatedException(io.pravega.segmentstore.contracts.StreamSegmentTruncatedException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) AtomicLong(java.util.concurrent.atomic.AtomicLong) Rule(org.junit.Rule) Assert(org.junit.Assert) Collections(java.util.Collections) Cache(io.pravega.segmentstore.storage.Cache) InputStream(java.io.InputStream) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) HashMap(java.util.HashMap) ByteArrayInputStream(java.io.ByteArrayInputStream) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) UncheckedIOException(java.io.UncheckedIOException) ByteArrayOutputStream(java.io.ByteArrayOutputStream) UncheckedIOException(java.io.UncheckedIOException) IOException(java.io.IOException) Cleanup(lombok.Cleanup) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AtomicLong(java.util.concurrent.atomic.AtomicLong) Random(java.util.Random) CompletionException(java.util.concurrent.CompletionException) AtomicLong(java.util.concurrent.atomic.AtomicLong) Test(org.junit.Test)

Example 23 with CompletionException

use of java.util.concurrent.CompletionException in project pravega by pravega.

the class Producer method runOneIteration.

// endregion
// region Producer Implementation
/**
 * Executes one iteration of the Producer.
 * 1. Requests a new ProducerOperation from the DataSource.
 * 2. Executes it.
 * 3. Completes the ProducerOperation with either success or failure based on the outcome step #2.
 */
private CompletableFuture<Void> runOneIteration() {
    this.iterationCount.incrementAndGet();
    val futures = new ArrayList<CompletableFuture<Void>>();
    for (int i = 0; i < this.config.getProducerParallelism(); i++) {
        ProducerOperation op = this.dataSource.nextOperation();
        if (op == null) {
            // Nothing more to do.
            this.canContinue.set(false);
            break;
        }
        CompletableFuture<Void> result;
        try {
            CompletableFuture<Void> waitOn = op.getWaitOn();
            if (waitOn != null) {
                result = waitOn.exceptionally(ex -> null).thenComposeAsync(v -> executeOperation(op), this.executorService);
            } else {
                result = executeOperation(op);
            }
        } catch (Throwable ex) {
            // Catch and handle sync errors.
            op.completed(-1);
            if (handleOperationError(ex, op)) {
                // Exception handled; skip this iteration since there's nothing more we can do.
                continue;
            } else {
                result = Futures.failedFuture(ex);
            }
        }
        futures.add(result.exceptionally(ex -> {
            // Catch and handle async errors.
            if (handleOperationError(ex, op)) {
                return null;
            }
            throw new CompletionException(ex);
        }));
    }
    return Futures.allOf(futures);
}
Also used : lombok.val(lombok.val) StoreAdapter(io.pravega.test.integration.selftest.adapters.StoreAdapter) SneakyThrows(lombok.SneakyThrows) Exceptions(io.pravega.common.Exceptions) lombok.val(lombok.val) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CompletableFuture(java.util.concurrent.CompletableFuture) CompletionException(java.util.concurrent.CompletionException) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) AbstractTimer(io.pravega.common.AbstractTimer) AtomicLong(java.util.concurrent.atomic.AtomicLong) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) NotImplementedException(org.apache.commons.lang.NotImplementedException) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) Futures(io.pravega.common.concurrent.Futures) CompletionException(java.util.concurrent.CompletionException) ArrayList(java.util.ArrayList)

Example 24 with CompletionException

use of java.util.concurrent.CompletionException in project pravega by pravega.

the class ClientAdapterBase method append.

@Override
public CompletableFuture<Void> append(String streamName, Event event, Duration timeout) {
    ensureRunning();
    ArrayView s = event.getSerialization();
    byte[] payload = s.arrayOffset() == 0 ? s.array() : Arrays.copyOfRange(s.array(), s.arrayOffset(), s.getLength());
    String routingKey = Integer.toString(event.getRoutingKey());
    String parentName = StreamSegmentNameUtils.getParentStreamSegmentName(streamName);
    if (isTransaction(streamName, parentName)) {
        // Dealing with a Transaction.
        return CompletableFuture.runAsync(() -> {
            try {
                UUID txnId = getTransactionId(streamName);
                getWriter(parentName, event.getRoutingKey()).getTxn(txnId).writeEvent(routingKey, payload);
            } catch (Exception ex) {
                this.transactionIds.remove(streamName);
                throw new CompletionException(ex);
            }
        }, this.testExecutor);
    } else {
        try {
            return getWriter(streamName, event.getRoutingKey()).writeEvent(routingKey, payload);
        } catch (Exception ex) {
            return Futures.failedFuture(ex);
        }
    }
}
Also used : CompletionException(java.util.concurrent.CompletionException) ArrayView(io.pravega.common.util.ArrayView) UUID(java.util.UUID) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) StreamingException(io.pravega.segmentstore.contracts.StreamingException) CompletionException(java.util.concurrent.CompletionException) StreamSegmentExistsException(io.pravega.segmentstore.contracts.StreamSegmentExistsException) TxnFailedException(io.pravega.client.stream.TxnFailedException)

Example 25 with CompletionException

use of java.util.concurrent.CompletionException in project pravega by pravega.

the class ClientAdapterBase method createStream.

@Override
public CompletableFuture<Void> createStream(String streamName, Duration timeout) {
    ensureRunning();
    return CompletableFuture.runAsync(() -> {
        if (this.streamWriters.containsKey(streamName)) {
            throw new CompletionException(new StreamSegmentExistsException(streamName));
        }
        StreamConfiguration config = StreamConfiguration.builder().streamName(streamName).scalingPolicy(ScalingPolicy.fixed(this.testConfig.getSegmentsPerStream())).scope(SCOPE).build();
        if (!getStreamManager().createStream(SCOPE, streamName, config)) {
            throw new CompletionException(new StreamingException(String.format("Unable to create Stream '%s'.", streamName)));
        }
        int writerCount = Math.max(1, this.testConfig.getProducerCount() / this.testConfig.getStreamCount());
        List<EventStreamWriter<byte[]>> writers = new ArrayList<>(writerCount);
        if (this.streamWriters.putIfAbsent(streamName, writers) == null) {
            for (int i = 0; i < writerCount; i++) {
                writers.add(getClientFactory().createEventWriter(streamName, SERIALIZER, WRITER_CONFIG));
            }
        }
    }, this.testExecutor);
}
Also used : StreamingException(io.pravega.segmentstore.contracts.StreamingException) StreamSegmentExistsException(io.pravega.segmentstore.contracts.StreamSegmentExistsException) CompletionException(java.util.concurrent.CompletionException) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) ArrayList(java.util.ArrayList) EventStreamWriter(io.pravega.client.stream.EventStreamWriter)

Aggregations

CompletionException (java.util.concurrent.CompletionException)199 Test (org.junit.Test)80 CompletableFuture (java.util.concurrent.CompletableFuture)62 List (java.util.List)52 ArrayList (java.util.ArrayList)51 IOException (java.io.IOException)45 Map (java.util.Map)39 Collection (java.util.Collection)31 ExecutionException (java.util.concurrent.ExecutionException)31 HashMap (java.util.HashMap)30 Collections (java.util.Collections)24 TimeUnit (java.util.concurrent.TimeUnit)22 Collectors (java.util.stream.Collectors)22 FlinkException (org.apache.flink.util.FlinkException)22 Before (org.junit.Before)21 Duration (java.time.Duration)19 Arrays (java.util.Arrays)19 BeforeClass (org.junit.BeforeClass)19 ExecutorService (java.util.concurrent.ExecutorService)18 Nonnull (javax.annotation.Nonnull)17