Search in sources :

Example 71 with OperationContext

use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.

the class ZookeeperKVTMetadataStore method recordLastKVTableSegment.

@Override
CompletableFuture<Void> recordLastKVTableSegment(String scope, String kvtable, int lastActiveSegment, OperationContext context, Executor executor) {
    final String deletePath = String.format(DELETED_KVTABLES_PATH, getScopedKVTName(scope, kvtable));
    byte[] maxSegmentNumberBytes = new byte[Integer.BYTES];
    BitConverter.writeInt(maxSegmentNumberBytes, 0, lastActiveSegment);
    return storeHelper.getData(deletePath, x -> BitConverter.readInt(x, 0)).exceptionally(e -> {
        if (e instanceof StoreException.DataNotFoundException) {
            return null;
        } else {
            throw new CompletionException(e);
        }
    }).thenCompose(data -> {
        log.debug("Recording last segment {} for stream {}/{} on deletion.", lastActiveSegment, scope, kvtable);
        if (data == null) {
            return Futures.toVoid(storeHelper.createZNodeIfNotExist(deletePath, maxSegmentNumberBytes));
        } else {
            final int oldLastActiveSegment = data.getObject();
            Preconditions.checkArgument(lastActiveSegment >= oldLastActiveSegment, "Old last active segment ({}) for {}/{} is higher than current one {}.", oldLastActiveSegment, scope, kvtable, lastActiveSegment);
            return Futures.toVoid(storeHelper.setData(deletePath, maxSegmentNumberBytes, data.getVersion()));
        }
    });
}
Also used : OperationContext(io.pravega.controller.store.stream.OperationContext) ZKScope(io.pravega.controller.store.ZKScope) Getter(lombok.Getter) Executor(java.util.concurrent.Executor) CompletableFuture(java.util.concurrent.CompletableFuture) CompletionException(java.util.concurrent.CompletionException) UUID(java.util.UUID) KeyValueTableConfiguration(io.pravega.client.tables.KeyValueTableConfiguration) BitConverter(io.pravega.common.util.BitConverter) ZKStoreHelper(io.pravega.controller.store.ZKStoreHelper) Slf4j(lombok.extern.slf4j.Slf4j) ZKPaths(org.apache.curator.utils.ZKPaths) AccessLevel(lombok.AccessLevel) CuratorFramework(org.apache.curator.framework.CuratorFramework) StoreException(io.pravega.controller.store.stream.StoreException) Preconditions(com.google.common.base.Preconditions) ZKHostIndex(io.pravega.controller.store.index.ZKHostIndex) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Futures(io.pravega.common.concurrent.Futures) CompletionException(java.util.concurrent.CompletionException) StoreException(io.pravega.controller.store.stream.StoreException)

Example 72 with OperationContext

use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.

the class ControllerServiceTest method setup.

@Before
public void setup() throws Exception {
    final TaskMetadataStore taskMetadataStore = TaskStoreFactory.createZKStore(PRAVEGA_ZK_CURATOR_RESOURCE.client, executor);
    final HostControllerStore hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
    BucketStore bucketStore = StreamStoreFactory.createInMemoryBucketStore();
    connectionPool = new ConnectionPoolImpl(ClientConfig.builder().build(), new SocketConnectionFactoryImpl(ClientConfig.builder().build()));
    SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
    streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
    streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
    kvtMetadataTasks = new TableMetadataTasks(kvtStore, segmentHelper, executor, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
    consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, new SegmentHelper(connectionPool, hostStore, executor), executor, null, requestTracker);
    final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
    final ScalingPolicy policy2 = ScalingPolicy.fixed(3);
    final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
    final StreamConfiguration configuration2 = StreamConfiguration.builder().scalingPolicy(policy2).build();
    // createScope
    streamStore.createScope(SCOPE, null, executor).get();
    // region createStream
    startTs = System.currentTimeMillis();
    OperationContext context = streamStore.createStreamContext(SCOPE, stream1, 0L);
    streamStore.createStream(SCOPE, stream1, configuration1, startTs, context, executor).get();
    streamStore.setState(SCOPE, stream1, State.ACTIVE, context, executor).get();
    OperationContext context2 = streamStore.createStreamContext(SCOPE, stream2, 0L);
    streamStore.createStream(SCOPE, stream2, configuration2, startTs, context2, executor).get();
    streamStore.setState(SCOPE, stream2, State.ACTIVE, context2, executor).get();
    // endregion
    // region scaleSegments
    SimpleEntry<Double, Double> segment1 = new SimpleEntry<>(0.5, 0.75);
    SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.75, 1.0);
    List<Long> sealedSegments = Collections.singletonList(1L);
    scaleTs = System.currentTimeMillis();
    VersionedMetadata<EpochTransitionRecord> record = streamStore.submitScale(SCOPE, stream1, sealedSegments, Arrays.asList(segment1, segment2), startTs, null, null, executor).get();
    VersionedMetadata<State> state = streamStore.getVersionedState(SCOPE, stream1, null, executor).get();
    state = streamStore.updateVersionedState(SCOPE, stream1, State.SCALING, state, null, executor).get();
    record = streamStore.startScale(SCOPE, stream1, false, record, state, null, executor).get();
    streamStore.scaleCreateNewEpochs(SCOPE, stream1, record, null, executor).get();
    streamStore.scaleSegmentsSealed(SCOPE, stream1, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), record, null, executor).get();
    streamStore.completeScale(SCOPE, stream1, record, null, executor).get();
    streamStore.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
    SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.0, 0.5);
    SimpleEntry<Double, Double> segment4 = new SimpleEntry<>(0.5, 0.75);
    SimpleEntry<Double, Double> segment5 = new SimpleEntry<>(0.75, 1.0);
    sealedSegments = Arrays.asList(0L, 1L, 2L);
    record = streamStore.submitScale(SCOPE, stream2, sealedSegments, Arrays.asList(segment3, segment4, segment5), scaleTs, null, null, executor).get();
    state = streamStore.getVersionedState(SCOPE, stream2, null, executor).get();
    state = streamStore.updateVersionedState(SCOPE, stream2, State.SCALING, state, null, executor).get();
    record = streamStore.startScale(SCOPE, stream2, false, record, state, null, executor).get();
    streamStore.scaleCreateNewEpochs(SCOPE, stream2, record, null, executor).get();
    streamStore.scaleSegmentsSealed(SCOPE, stream2, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), record, null, executor).get();
    streamStore.completeScale(SCOPE, stream2, record, null, executor).get();
    streamStore.setState(SCOPE, stream2, State.ACTIVE, null, executor).get();
// endregion
}
Also used : OperationContext(io.pravega.controller.store.stream.OperationContext) Arrays(java.util.Arrays) AssertExtensions(io.pravega.test.common.AssertExtensions) ArgumentMatchers.eq(org.mockito.ArgumentMatchers.eq) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) Mockito.doThrow(org.mockito.Mockito.doThrow) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) StoreException(io.pravega.controller.store.stream.StoreException) TaskMetadataStore(io.pravega.controller.store.task.TaskMetadataStore) Map(java.util.Map) After(org.junit.After) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Controller(io.pravega.controller.stream.api.grpc.v1.Controller) ClassRule(org.junit.ClassRule) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) PravegaZkCuratorResource(io.pravega.controller.PravegaZkCuratorResource) RequestTracker(io.pravega.common.tracing.RequestTracker) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) KVTableMetadataStore(io.pravega.controller.store.kvtable.KVTableMetadataStore) List(java.util.List) StreamMetadataStore(io.pravega.controller.store.stream.StreamMetadataStore) Futures(io.pravega.common.concurrent.Futures) GrpcAuthHelper(io.pravega.controller.server.security.auth.GrpcAuthHelper) Mockito.mock(org.mockito.Mockito.mock) SegmentId(io.pravega.controller.stream.api.grpc.v1.Controller.SegmentId) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) KVTableStoreFactory(io.pravega.controller.store.kvtable.KVTableStoreFactory) OperationContext(io.pravega.controller.store.stream.OperationContext) StreamMetrics(io.pravega.controller.metrics.StreamMetrics) StreamStoreFactory(io.pravega.controller.store.stream.StreamStoreFactory) TransactionMetrics(io.pravega.controller.metrics.TransactionMetrics) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) SegmentHelper(io.pravega.controller.server.SegmentHelper) ModelHelper(io.pravega.client.control.impl.ModelHelper) Exceptions(io.pravega.common.Exceptions) CompletableFuture(java.util.concurrent.CompletableFuture) ConnectionPoolImpl(io.pravega.client.connection.impl.ConnectionPoolImpl) ArgumentMatchers.anyBoolean(org.mockito.ArgumentMatchers.anyBoolean) Mockito.spy(org.mockito.Mockito.spy) BucketStore(io.pravega.controller.store.stream.BucketStore) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) StreamMetadataTasks(io.pravega.controller.task.Stream.StreamMetadataTasks) HostMonitorConfigImpl(io.pravega.controller.store.host.impl.HostMonitorConfigImpl) SimpleEntry(java.util.AbstractMap.SimpleEntry) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) Before(org.junit.Before) ControllerService(io.pravega.controller.server.ControllerService) SegmentHelperMock(io.pravega.controller.mocks.SegmentHelperMock) ConnectionPool(io.pravega.client.connection.impl.ConnectionPool) Test(org.junit.Test) TableMetadataTasks(io.pravega.controller.task.KeyValueTable.TableMetadataTasks) HostStoreFactory(io.pravega.controller.store.host.HostStoreFactory) ExecutionException(java.util.concurrent.ExecutionException) TaskStoreFactory(io.pravega.controller.store.task.TaskStoreFactory) HostControllerStore(io.pravega.controller.store.host.HostControllerStore) StreamTransactionMetadataTasks(io.pravega.controller.task.Stream.StreamTransactionMetadataTasks) State(io.pravega.controller.store.stream.State) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) Collections(java.util.Collections) Mockito.reset(org.mockito.Mockito.reset) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) ClientConfig(io.pravega.client.ClientConfig) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) TaskMetadataStore(io.pravega.controller.store.task.TaskMetadataStore) SimpleEntry(java.util.AbstractMap.SimpleEntry) ConnectionPoolImpl(io.pravega.client.connection.impl.ConnectionPoolImpl) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) SegmentHelper(io.pravega.controller.server.SegmentHelper) ControllerService(io.pravega.controller.server.ControllerService) State(io.pravega.controller.store.stream.State) HostControllerStore(io.pravega.controller.store.host.HostControllerStore) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) StreamTransactionMetadataTasks(io.pravega.controller.task.Stream.StreamTransactionMetadataTasks) TableMetadataTasks(io.pravega.controller.task.KeyValueTable.TableMetadataTasks) BucketStore(io.pravega.controller.store.stream.BucketStore) StreamMetadataTasks(io.pravega.controller.task.Stream.StreamMetadataTasks) Before(org.junit.Before)

Example 73 with OperationContext

use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.

the class CreateTableTask method execute.

@Override
public CompletableFuture<Void> execute(final CreateTableEvent request) {
    String scope = request.getScopeName();
    String kvt = request.getKvtName();
    int partitionCount = request.getPartitionCount();
    int primaryKeyLength = request.getPrimaryKeyLength();
    int secondaryKeyLength = request.getSecondaryKeyLength();
    long creationTime = request.getTimestamp();
    long requestId = request.getRequestId();
    long rolloverSize = request.getRolloverSizeBytes();
    String kvTableId = request.getTableId().toString();
    KeyValueTableConfiguration config = KeyValueTableConfiguration.builder().partitionCount(partitionCount).primaryKeyLength(primaryKeyLength).secondaryKeyLength(secondaryKeyLength).rolloverSizeBytes(rolloverSize).build();
    final OperationContext context = kvtMetadataStore.createContext(scope, kvt, requestId);
    return RetryHelper.withRetriesAsync(() -> getKeyValueTable(scope, kvt).thenCompose(table -> table.getId(context)).thenCompose(id -> {
        if (!id.equals(kvTableId)) {
            log.debug(requestId, "Skipped processing create event for KeyValueTable {}/{} with Id:{} as UUIDs did not match.", scope, kvt, id);
            return CompletableFuture.completedFuture(null);
        } else {
            return kvtMetadataStore.isScopeSealed(scope, context, executor).thenCompose(isScopeSealed -> {
                if (isScopeSealed) {
                    log.warn(requestId, "Scope {} is in sealed state: ", scope);
                    throw new IllegalStateException("Scope in sealed state: " + scope);
                }
                return this.kvtMetadataStore.createKeyValueTable(scope, kvt, config, creationTime, context, executor).thenComposeAsync(response -> {
                    // segments and change the state of the kvtable to active.
                    if (response.getStatus().equals(CreateKVTableResponse.CreateStatus.NEW) || response.getStatus().equals(CreateKVTableResponse.CreateStatus.EXISTS_CREATING)) {
                        final int startingSegmentNumber = response.getStartingSegmentNumber();
                        final int minNumSegments = response.getConfiguration().getPartitionCount();
                        final int keyLength = response.getConfiguration().getPrimaryKeyLength() + response.getConfiguration().getSecondaryKeyLength();
                        List<Long> newSegments = IntStream.range(startingSegmentNumber, startingSegmentNumber + minNumSegments).boxed().map(x -> NameUtils.computeSegmentId(x, 0)).collect(Collectors.toList());
                        kvtMetadataTasks.createNewSegments(scope, kvt, newSegments, keyLength, requestId, config.getRolloverSizeBytes()).thenCompose(y -> {
                            kvtMetadataStore.getVersionedState(scope, kvt, context, executor).thenCompose(state -> {
                                if (state.getObject().equals(KVTableState.CREATING)) {
                                    kvtMetadataStore.updateVersionedState(scope, kvt, KVTableState.ACTIVE, state, context, executor);
                                }
                                return CompletableFuture.completedFuture(null);
                            });
                            return CompletableFuture.completedFuture(null);
                        });
                    }
                    return CompletableFuture.completedFuture(null);
                }, executor);
            });
        }
    }), e -> Exceptions.unwrap(e) instanceof RetryableException, Integer.MAX_VALUE, executor);
}
Also used : OperationContext(io.pravega.controller.store.stream.OperationContext) IntStream(java.util.stream.IntStream) OperationContext(io.pravega.controller.store.stream.OperationContext) NameUtils(io.pravega.shared.NameUtils) KeyValueTable(io.pravega.controller.store.kvtable.KeyValueTable) Exceptions(io.pravega.common.Exceptions) LoggerFactory(org.slf4j.LoggerFactory) CompletableFuture(java.util.concurrent.CompletableFuture) CreateKVTableResponse(io.pravega.controller.store.kvtable.CreateKVTableResponse) KeyValueTableConfiguration(io.pravega.client.tables.KeyValueTableConfiguration) TableMetadataTasks(io.pravega.controller.task.KeyValueTable.TableMetadataTasks) Collectors(java.util.stream.Collectors) KVTableMetadataStore(io.pravega.controller.store.kvtable.KVTableMetadataStore) CreateTableEvent(io.pravega.shared.controller.event.kvtable.CreateTableEvent) List(java.util.List) TagLogger(io.pravega.common.tracing.TagLogger) RetryableException(io.pravega.controller.retryable.RetryableException) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) Preconditions(com.google.common.base.Preconditions) KVTableState(io.pravega.controller.store.kvtable.KVTableState) RetryHelper(io.pravega.controller.util.RetryHelper) KeyValueTableConfiguration(io.pravega.client.tables.KeyValueTableConfiguration) RetryableException(io.pravega.controller.retryable.RetryableException) List(java.util.List)

Example 74 with OperationContext

use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.

the class TruncateStreamTask method processTruncate.

private CompletableFuture<Void> processTruncate(String scope, String stream, VersionedMetadata<StreamTruncationRecord> versionedTruncationRecord, VersionedMetadata<State> versionedState, OperationContext context, long requestId) {
    String delegationToken = this.streamMetadataTasks.retrieveDelegationToken();
    StreamTruncationRecord truncationRecord = versionedTruncationRecord.getObject();
    log.info(requestId, "Truncating stream {}/{} at stream cut: {}", scope, stream, truncationRecord.getStreamCut());
    return Futures.toVoid(streamMetadataStore.updateVersionedState(scope, stream, State.TRUNCATING, versionedState, context, executor).thenCompose(update -> notifyTruncateSegments(scope, stream, truncationRecord.getStreamCut(), delegationToken, requestId).thenCompose(x -> notifyDeleteSegments(scope, stream, truncationRecord.getToDelete(), delegationToken, requestId)).thenAccept(x -> DYNAMIC_LOGGER.reportGaugeValue(TRUNCATED_SIZE, versionedTruncationRecord.getObject().getSizeTill(), streamTags(scope, stream))).thenCompose(deleted -> streamMetadataStore.completeTruncation(scope, stream, versionedTruncationRecord, context, executor)).thenCompose(x -> streamMetadataStore.updateVersionedState(scope, stream, State.ACTIVE, update, context, executor))));
}
Also used : OperationContext(io.pravega.controller.store.stream.OperationContext) NameUtils(io.pravega.shared.NameUtils) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) TruncateStreamEvent(io.pravega.shared.controller.event.TruncateStreamEvent) LoggerFactory(org.slf4j.LoggerFactory) Set(java.util.Set) CompletableFuture(java.util.concurrent.CompletableFuture) Collectors(java.util.stream.Collectors) MetricsTags.streamTags(io.pravega.shared.MetricsTags.streamTags) MetricsProvider(io.pravega.shared.metrics.MetricsProvider) TagLogger(io.pravega.common.tracing.TagLogger) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) DynamicLogger(io.pravega.shared.metrics.DynamicLogger) TRUNCATED_SIZE(io.pravega.shared.MetricsNames.TRUNCATED_SIZE) Map(java.util.Map) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) StreamMetadataTasks(io.pravega.controller.task.Stream.StreamMetadataTasks) Preconditions(com.google.common.base.Preconditions) State(io.pravega.controller.store.stream.State) StreamMetadataStore(io.pravega.controller.store.stream.StreamMetadataStore) Futures(io.pravega.common.concurrent.Futures) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord)

Example 75 with OperationContext

use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.

the class ZKScope method listStreams.

@Override
public CompletableFuture<Pair<List<String>, String>> listStreams(int limit, String continuationToken, Executor executor, OperationContext context) {
    // Stream references are stored under a hierarchy of nodes as described in `addStreamsInScope` method.
    // A continuation token is essentially a serialized integer that is broken into three parts -
    // msb 2 bytes, middle 4 bytes and lsb 4 bytes.
    // stream references are stored as /01/2345/stream-6789.
    // So effectively all streams under the scope are ordered by the stream position. Stream position is strictly
    // increasing and any new stream that is added to the scope will be done at a higher position.
    // Streams can be deleted though.
    // So now the continuation token basically signifies the position of last stream that was returned in previous iteration.
    // And continuing from the continuation token, we simply retrieve next `limit` number of streams under the scope.
    // Since the streams are stored under the aforesaid hierarchy, we start with all children of the `middle` part of
    // continuation token and only include streams whose position is greater than token.
    // If we are not able to get `limit` number of streams from this, we go to the next higher available `middle` znode
    // and fetch its children. If middle is exhausted, we increment the `msb` and repeat until we have found `limit`
    // number of streams or reached the end.
    List<String> toReturn = new LinkedList<>();
    AtomicInteger remaining = new AtomicInteger(limit);
    Token floor = Token.fromString(continuationToken);
    AtomicReference<Token> lastPos = new AtomicReference<>(floor);
    // compute on all available top level children (0-99) that are greater than floor.getLeft
    return computeOnChildren(streamsInScopePath, topChild -> {
        if (topChild >= floor.getMsb()) {
            String topPath = ZKPaths.makePath(streamsInScopePath, topChild.toString());
            // set middle floor = supplied floor OR 0 if top floor has been incremented
            int middleFloor = topChild.intValue() == floor.getMsb() ? floor.getMiddle() : 0;
            // compute on all available middle level children (0-9999) of current top level that are greater than middle floor
            CompletableFuture<Void> voidCompletableFuture = computeOnChildren(topPath, middleChild -> {
                if (middleChild >= middleFloor) {
                    String middlePath = ZKPaths.makePath(topPath, middleChild.toString());
                    return store.getChildren(middlePath).thenAccept(streams -> {
                        // set bottom floor = -1 if we have incremented either top or middle floors
                        int bottomFloor = topChild.intValue() == floor.getMsb() && middleChild.intValue() == floor.getMiddle() ? floor.getLsb() : -1;
                        Pair<List<String>, Integer> retVal = filterStreams(streams, bottomFloor, remaining.get());
                        if (!retVal.getKey().isEmpty()) {
                            toReturn.addAll(retVal.getKey());
                            remaining.set(limit - toReturn.size());
                            lastPos.set(new Token(topChild, middleChild, retVal.getValue()));
                        }
                    }).thenApply(v -> remaining.get() > 0);
                } else {
                    return CompletableFuture.completedFuture(true);
                }
            }, executor);
            return voidCompletableFuture.thenApply(v -> remaining.get() > 0);
        } else {
            return CompletableFuture.completedFuture(true);
        }
    }, executor).thenApply(v -> new ImmutablePair<>(toReturn, lastPos.get().toString()));
}
Also used : NotImplementedException(org.apache.commons.lang3.NotImplementedException) OperationContext(io.pravega.controller.store.stream.OperationContext) SneakyThrows(lombok.SneakyThrows) Exceptions(io.pravega.common.Exceptions) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CompletableFuture(java.util.concurrent.CompletableFuture) RevisionDataInput(io.pravega.common.io.serialization.RevisionDataInput) BitConverter(io.pravega.common.util.BitConverter) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) Strings(com.google.common.base.Strings) ZKPaths(org.apache.curator.utils.ZKPaths) StoreException(io.pravega.controller.store.stream.StoreException) Pair(org.apache.commons.lang3.tuple.Pair) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) LinkedList(java.util.LinkedList) RevisionDataOutput(io.pravega.common.io.serialization.RevisionDataOutput) VersionedSerializer(io.pravega.common.io.serialization.VersionedSerializer) Executor(java.util.concurrent.Executor) Predicate(java.util.function.Predicate) IOException(java.io.IOException) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) ImmutablePair(org.apache.commons.lang3.tuple.ImmutablePair) ObjectBuilder(io.pravega.common.ObjectBuilder) Base64(java.util.Base64) List(java.util.List) ByteArraySegment(io.pravega.common.util.ByteArraySegment) Builder(lombok.Builder) Data(lombok.Data) Preconditions(com.google.common.base.Preconditions) Comparator(java.util.Comparator) Futures(io.pravega.common.concurrent.Futures) AtomicReference(java.util.concurrent.atomic.AtomicReference) LinkedList(java.util.LinkedList) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) CompletableFuture(java.util.concurrent.CompletableFuture) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) LinkedList(java.util.LinkedList) List(java.util.List)

Aggregations

OperationContext (io.pravega.controller.store.stream.OperationContext)76 CompletableFuture (java.util.concurrent.CompletableFuture)53 Futures (io.pravega.common.concurrent.Futures)48 StreamMetadataStore (io.pravega.controller.store.stream.StreamMetadataStore)44 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)42 Exceptions (io.pravega.common.Exceptions)41 Collectors (java.util.stream.Collectors)41 UUID (java.util.UUID)39 StoreException (io.pravega.controller.store.stream.StoreException)38 List (java.util.List)38 TagLogger (io.pravega.common.tracing.TagLogger)37 LoggerFactory (org.slf4j.LoggerFactory)37 Preconditions (com.google.common.base.Preconditions)36 Map (java.util.Map)32 NameUtils (io.pravega.shared.NameUtils)31 VisibleForTesting (com.google.common.annotations.VisibleForTesting)27 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)26 State (io.pravega.controller.store.stream.State)26 CompletionException (java.util.concurrent.CompletionException)26 BucketStore (io.pravega.controller.store.stream.BucketStore)25