Search in sources :

Example 21 with StreamConfigurationRecord

use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.

the class StreamMetadataTasksTest method updateStreamTest.

@Test(timeout = 30000)
public void updateStreamTest() throws Exception {
    assertNotEquals(0, consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    StreamConfiguration streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(5)).build();
    StreamConfigurationRecord configProp = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join().getObject();
    assertFalse(configProp.isUpdating());
    // 1. happy day test
    // update.. should succeed
    CompletableFuture<UpdateStreamStatus.Status> updateOperationFuture = streamMetadataTasks.updateStream(SCOPE, stream1, streamConfiguration, 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    assertEquals(UpdateStreamStatus.Status.SUCCESS, updateOperationFuture.join());
    configProp = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join().getObject();
    assertTrue(configProp.getStreamConfiguration().equals(streamConfiguration));
    streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(6)).build();
    // 2. change state to scaling
    streamStorePartialMock.setState(SCOPE, stream1, State.SCALING, null, executor).get();
    // call update should fail without posting the event
    streamMetadataTasks.updateStream(SCOPE, stream1, streamConfiguration, 0L);
    AtomicBoolean loop = new AtomicBoolean(false);
    Futures.loop(() -> !loop.get(), () -> streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).thenApply(x -> x.getObject().isUpdating()).thenAccept(loop::set), executor).join();
    // event posted, first step performed. now pick the event for processing
    UpdateStreamTask updateStreamTask = new UpdateStreamTask(streamMetadataTasks, streamStorePartialMock, bucketStore, executor);
    UpdateStreamEvent taken = (UpdateStreamEvent) requestEventWriter.eventQueue.take();
    AssertExtensions.assertFutureThrows("", updateStreamTask.execute(taken), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
    streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
    // now with state = active, process the same event. it should succeed now.
    assertTrue(Futures.await(updateStreamTask.execute(taken)));
    // 3. multiple back to back updates.
    StreamConfiguration streamConfiguration1 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 1, 2)).build();
    CompletableFuture<UpdateStreamStatus.Status> updateOperationFuture1 = streamMetadataTasks.updateStream(SCOPE, stream1, streamConfiguration1, 0L);
    // ensure that previous updatestream has posted the event and set status to updating,
    // only then call second updateStream
    AtomicBoolean loop2 = new AtomicBoolean(false);
    Futures.loop(() -> !loop2.get(), () -> streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).thenApply(x -> x.getObject().isUpdating()).thenAccept(loop2::set), executor).join();
    configProp = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join().getObject();
    assertTrue(configProp.getStreamConfiguration().equals(streamConfiguration1) && configProp.isUpdating());
    StreamConfiguration streamConfiguration2 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(7)).build();
    // post the second update request. This should fail here itself as previous one has started.
    CompletableFuture<UpdateStreamStatus.Status> updateOperationFuture2 = streamMetadataTasks.updateStream(SCOPE, stream1, streamConfiguration2, 0L);
    assertEquals(UpdateStreamStatus.Status.FAILURE, updateOperationFuture2.join());
    // process event
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    // verify that first request for update also completes with success.
    assertEquals(UpdateStreamStatus.Status.SUCCESS, updateOperationFuture1.join());
    configProp = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join().getObject();
    assertTrue(configProp.getStreamConfiguration().equals(streamConfiguration1) && !configProp.isUpdating());
    streamStorePartialMock.setState(SCOPE, stream1, State.UPDATING, null, executor).join();
    UpdateStreamEvent event = new UpdateStreamEvent(SCOPE, stream1, System.nanoTime());
    assertTrue(Futures.await(updateStreamTask.execute(event)));
    // execute the event again. It should complete without doing anything.
    updateStreamTask.execute(event).join();
    assertEquals(State.ACTIVE, streamStorePartialMock.getState(SCOPE, stream1, true, null, executor).join());
}
Also used : UpdateSubscriberStatus(io.pravega.controller.stream.api.grpc.v1.Controller.UpdateSubscriberStatus) DeleteReaderGroupStatus(io.pravega.controller.stream.api.grpc.v1.Controller.DeleteReaderGroupStatus) ScaleStreamStatus(io.pravega.controller.stream.api.grpc.v1.Controller.ScaleResponse.ScaleStreamStatus) TxnStatus(io.pravega.controller.store.stream.TxnStatus) UpdateStreamStatus(io.pravega.controller.stream.api.grpc.v1.Controller.UpdateStreamStatus) UpdateStreamEvent(io.pravega.shared.controller.event.UpdateStreamEvent) Arrays(java.util.Arrays) StreamCut(io.pravega.client.stream.StreamCut) EventStreamWriter(io.pravega.client.stream.EventStreamWriter) ArgumentMatchers.eq(org.mockito.ArgumentMatchers.eq) Cleanup(lombok.Cleanup) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) AbstractStreamMetadataStore(io.pravega.controller.store.stream.AbstractStreamMetadataStore) StoreException(io.pravega.controller.store.stream.StoreException) AutoScaleTask(io.pravega.controller.server.eventProcessor.requesthandlers.AutoScaleTask) TaskMetadataStore(io.pravega.controller.store.task.TaskMetadataStore) Duration(java.time.Duration) Map(java.util.Map) Mockito.doAnswer(org.mockito.Mockito.doAnswer) RGStreamCutRecord(io.pravega.shared.controller.event.RGStreamCutRecord) LockFailedException(io.pravega.controller.store.task.LockFailedException) Mockito.doReturn(org.mockito.Mockito.doReturn) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) StreamMetadataStoreTestHelper(io.pravega.controller.store.stream.StreamMetadataStoreTestHelper) DeleteStreamEvent(io.pravega.shared.controller.event.DeleteStreamEvent) RequestTracker(io.pravega.common.tracing.RequestTracker) ActiveTxnRecord(io.pravega.controller.store.stream.records.ActiveTxnRecord) ControllerEvent(io.pravega.shared.controller.event.ControllerEvent) KVTableMetadataStore(io.pravega.controller.store.kvtable.KVTableMetadataStore) Assert.assertFalse(org.junit.Assert.assertFalse) CreateReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.CreateReaderGroupTask) StreamMetadataStore(io.pravega.controller.store.stream.StreamMetadataStore) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) Futures(io.pravega.common.concurrent.Futures) GrpcAuthHelper(io.pravega.controller.server.security.auth.GrpcAuthHelper) Mockito.mock(org.mockito.Mockito.mock) CuratorFrameworkFactory(org.apache.curator.framework.CuratorFrameworkFactory) Mockito.doCallRealMethod(org.mockito.Mockito.doCallRealMethod) StreamMetrics(io.pravega.controller.metrics.StreamMetrics) StreamStoreFactory(io.pravega.controller.store.stream.StreamStoreFactory) TransactionMetrics(io.pravega.controller.metrics.TransactionMetrics) NameUtils.computeSegmentId(io.pravega.shared.NameUtils.computeSegmentId) ConnectionFactory(io.pravega.client.connection.impl.ConnectionFactory) Mock(org.mockito.Mock) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) Exceptions(io.pravega.common.Exceptions) TruncateStreamEvent(io.pravega.shared.controller.event.TruncateStreamEvent) Mockito.spy(org.mockito.Mockito.spy) Supplier(java.util.function.Supplier) UpdateSubscriberStatus(io.pravega.controller.stream.api.grpc.v1.Controller.UpdateSubscriberStatus) ArrayList(java.util.ArrayList) Lists(com.google.common.collect.Lists) TestingServerStarter(io.pravega.test.common.TestingServerStarter) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) TestingServer(org.apache.curator.test.TestingServer) EventHelper(io.pravega.controller.task.EventHelper) UpdateReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.UpdateReaderGroupTask) CreateReaderGroupEvent(io.pravega.shared.controller.event.CreateReaderGroupEvent) Before(org.junit.Before) SegmentHelperMock(io.pravega.controller.mocks.SegmentHelperMock) EventHelperMock(io.pravega.controller.mocks.EventHelperMock) Assert.assertTrue(org.junit.Assert.assertTrue) CreateStreamResponse(io.pravega.controller.store.stream.CreateStreamResponse) Test(org.junit.Test) TableMetadataTasks(io.pravega.controller.task.KeyValueTable.TableMetadataTasks) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) ExecutionException(java.util.concurrent.ExecutionException) AtomicLong(java.util.concurrent.atomic.AtomicLong) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) TaskStoreFactory(io.pravega.controller.store.task.TaskStoreFactory) Assert.assertNull(org.junit.Assert.assertNull) DeleteStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteStreamTask) StreamRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.StreamRequestHandler) DeleteReaderGroupStatus(io.pravega.controller.stream.api.grpc.v1.Controller.DeleteReaderGroupStatus) Mockito.reset(org.mockito.Mockito.reset) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) CommitEvent(io.pravega.shared.controller.event.CommitEvent) AssertExtensions(io.pravega.test.common.AssertExtensions) DeleteReaderGroupEvent(io.pravega.shared.controller.event.DeleteReaderGroupEvent) TimeoutException(java.util.concurrent.TimeoutException) SealStreamEvent(io.pravega.shared.controller.event.SealStreamEvent) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) Stream(io.pravega.client.stream.Stream) After(org.junit.After) SubscribersResponse(io.pravega.controller.stream.api.grpc.v1.Controller.SubscribersResponse) Controller(io.pravega.controller.stream.api.grpc.v1.Controller) TaskExceptions(io.pravega.controller.server.eventProcessor.requesthandlers.TaskExceptions) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) ImmutableSet(com.google.common.collect.ImmutableSet) SealStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.SealStreamTask) ImmutableMap(com.google.common.collect.ImmutableMap) AssertExtensions.assertFutureThrows(io.pravega.test.common.AssertExtensions.assertFutureThrows) DeleteScopeEvent(io.pravega.shared.controller.event.DeleteScopeEvent) CompletionException(java.util.concurrent.CompletionException) ScaleStreamStatus(io.pravega.controller.stream.api.grpc.v1.Controller.ScaleResponse.ScaleStreamStatus) UUID(java.util.UUID) ScaleOperationTask(io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Collectors(java.util.stream.Collectors) AbstractClientFactoryImpl(io.pravega.client.stream.impl.AbstractClientFactoryImpl) List(java.util.List) CuratorFramework(org.apache.curator.framework.CuratorFramework) Config(io.pravega.controller.util.Config) UpdateStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.UpdateStreamTask) TxnStatus(io.pravega.controller.store.stream.TxnStatus) VersionedTransactionData(io.pravega.controller.store.stream.VersionedTransactionData) Optional(java.util.Optional) DeleteScopeTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteScopeTask) TruncateStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.TruncateStreamTask) NotImplementedException(org.apache.commons.lang3.NotImplementedException) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) OperationContext(io.pravega.controller.store.stream.OperationContext) Segment(io.pravega.client.segment.impl.Segment) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) Getter(lombok.Getter) SegmentHelper(io.pravega.controller.server.SegmentHelper) ModelHelper(io.pravega.client.control.impl.ModelHelper) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) BucketStore(io.pravega.controller.store.stream.BucketStore) AbortEvent(io.pravega.shared.controller.event.AbortEvent) ExponentialBackoffRetry(org.apache.curator.retry.ExponentialBackoffRetry) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) LinkedList(java.util.LinkedList) ArgumentMatchers.anyInt(org.mockito.ArgumentMatchers.anyInt) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) ScaleResponse(io.pravega.controller.stream.api.grpc.v1.Controller.ScaleResponse) EventWriterConfig(io.pravega.client.stream.EventWriterConfig) ControllerService(io.pravega.controller.server.ControllerService) NameUtils(io.pravega.shared.NameUtils) Assert.assertNotNull(org.junit.Assert.assertNotNull) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) AbstractMap(java.util.AbstractMap) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) UpdateStreamStatus(io.pravega.controller.stream.api.grpc.v1.Controller.UpdateStreamStatus) Data(lombok.Data) State(io.pravega.controller.store.stream.State) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) UpdateReaderGroupEvent(io.pravega.shared.controller.event.UpdateReaderGroupEvent) DeleteReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteReaderGroupTask) ReaderGroupConfigResponse(io.pravega.controller.stream.api.grpc.v1.Controller.ReaderGroupConfigResponse) ScaleOpEvent(io.pravega.shared.controller.event.ScaleOpEvent) Collections(java.util.Collections) ClientConfig(io.pravega.client.ClientConfig) UpdateStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.UpdateStreamTask) StoreException(io.pravega.controller.store.stream.StoreException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) UpdateStreamEvent(io.pravega.shared.controller.event.UpdateStreamEvent) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) Test(org.junit.Test)

Example 22 with StreamConfigurationRecord

use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.

the class StreamMetadataStoreTest method concurrentStartScaleTest.

@Test(timeout = 30000)
public void concurrentStartScaleTest() throws Exception {
    final String scope = "ScopeScale";
    final String stream = "StreamScale1";
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
    long start = System.currentTimeMillis();
    store.createScope(scope, null, executor).get();
    store.createStream(scope, stream, configuration, start, null, executor).get();
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
    // set minimum number of segments to 1
    StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
    store.startUpdateConfiguration(scope, stream, config, null, executor).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = store.getConfigurationRecord(scope, stream, null, executor).join();
    store.completeUpdateConfiguration(scope, stream, configRecord, null, executor).join();
    // region concurrent start scale
    // Test scenario where one request starts and completes as the other is waiting on StartScale.createEpochTransition
    SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.0, 1.0);
    List<Long> segmentsToSeal = Arrays.asList(0L, 1L);
    long scaleTs = System.currentTimeMillis();
    @SuppressWarnings("unchecked") PersistentStreamBase streamObj = (PersistentStreamBase) ((AbstractStreamMetadataStore) store).getStream(scope, stream, null);
    PersistentStreamBase streamObjSpied = spy(streamObj);
    CompletableFuture<Void> latch = new CompletableFuture<>();
    CompletableFuture<Void> updateEpochTransitionCalled = new CompletableFuture<>();
    doAnswer(x -> streamObj.getEpochTransitionNode(x.getArgument(0))).when(streamObjSpied).getEpochTransitionNode(any());
    doAnswer(x -> streamObj.updateEpochTransitionNode(x.getArgument(0), x.getArgument(1))).when(streamObjSpied).updateEpochTransitionNode(any(), any());
    doAnswer(x -> CompletableFuture.runAsync(() -> {
        VersionedMetadata<EpochTransitionRecord> argument = x.getArgument(0);
        EpochTransitionRecord record = argument.getObject();
        if (record.getSegmentsToSeal().containsAll(segmentsToSeal)) {
            // wait until we create epoch transition outside of this method
            updateEpochTransitionCalled.complete(null);
            latch.join();
        }
    }).thenCompose(v -> streamObj.updateEpochTransitionNode(x.getArgument(0), x.getArgument(1)))).when(streamObjSpied).updateEpochTransitionNode(any(), any());
    StreamOperationContext context = new StreamOperationContext(((AbstractStreamMetadataStore) store).getScope(scope, null), streamObjSpied, 0L);
    // the following should be stuck at createEpochTransition
    CompletableFuture<VersionedMetadata<EpochTransitionRecord>> response = store.submitScale(scope, stream, segmentsToSeal, Collections.singletonList(segment2), scaleTs, null, context, executor);
    updateEpochTransitionCalled.join();
    // create new epochs corresponding to new scale as the previous scale waits to create epoch transition record
    SimpleEntry<Double, Double> segment2p = new SimpleEntry<>(0.0, 0.5);
    List<Long> segmentsToSeal2 = Collections.singletonList(0L);
    long scaleTs2 = System.currentTimeMillis();
    streamObjSpied.getEpochRecord(0, context).thenCompose(epochRecord -> {
        EpochTransitionRecord record = RecordHelper.computeEpochTransition(epochRecord, segmentsToSeal2, Collections.singletonList(segment2p), scaleTs2);
        return streamObjSpied.getEpochTransition(context).thenCompose(existing -> streamObjSpied.updateEpochTransitionNode(new VersionedMetadata<>(record, existing.getVersion()), context)).thenApply(v -> new VersionedMetadata<>(record, v));
    }).thenCompose(epochRecord -> store.getVersionedState(scope, stream, context, executor).thenCompose(state -> store.updateVersionedState(scope, stream, State.SCALING, state, context, executor).thenCompose(updatedState -> store.startScale(scope, stream, false, epochRecord, updatedState, context, executor)).thenCompose(x -> store.scaleCreateNewEpochs(scope, stream, epochRecord, context, executor)).thenCompose(x -> store.scaleSegmentsSealed(scope, stream, segmentsToSeal2.stream().collect(Collectors.toMap(r -> r, r -> 0L)), epochRecord, context, executor)).thenCompose(x -> store.completeScale(scope, stream, epochRecord, context, executor)))).thenCompose(y -> store.setState(scope, stream, State.ACTIVE, context, executor)).join();
    latch.complete(null);
    // first scale should fail in attempting to update epoch transition record.
    AssertExtensions.assertSuppliedFutureThrows("WriteConflict in start scale", () -> response, e -> Exceptions.unwrap(e) instanceof StoreException.WriteConflictException);
    VersionedMetadata<EpochTransitionRecord> versioned = streamObj.getEpochTransition(context).join();
    EpochTransitionRecord epochTransitionRecord = versioned.getObject();
    assertEquals(EpochTransitionRecord.EMPTY, epochTransitionRecord);
    // now that start scale succeeded, we should set the state to scaling.
    VersionedMetadata<State> state = store.getVersionedState(scope, stream, context, executor).join();
    state = store.updateVersionedState(scope, stream, State.SCALING, state, context, executor).join();
    // now call first step of scaling -- createNewSegments. this should throw exception
    AssertExtensions.assertFutureThrows("epoch transition was supposed to be invalid", store.startScale(scope, stream, false, versioned, state, context, executor), e -> Exceptions.unwrap(e) instanceof IllegalStateException);
    // verify that state is reset to ACTIVE
    assertEquals(State.ACTIVE, store.getState(scope, stream, true, context, executor).join());
// endregion
}
Also used : Arrays(java.util.Arrays) StreamCut(io.pravega.client.stream.StreamCut) ArgumentMatchers(org.mockito.ArgumentMatchers) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) AssertExtensions(io.pravega.test.common.AssertExtensions) Random(java.util.Random) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) Pair(org.apache.commons.lang3.tuple.Pair) Stream(io.pravega.client.stream.Stream) Duration(java.time.Duration) Map(java.util.Map) After(org.junit.After) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Controller(io.pravega.controller.stream.api.grpc.v1.Controller) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) DeleteScopeStatus(io.pravega.controller.stream.api.grpc.v1.Controller.DeleteScopeStatus) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) StreamCutReferenceRecord(io.pravega.controller.store.stream.records.StreamCutReferenceRecord) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) ImmutableMap(com.google.common.collect.ImmutableMap) Set(java.util.Set) CompletionException(java.util.concurrent.CompletionException) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) List(java.util.List) Assert.assertFalse(org.junit.Assert.assertFalse) Optional(java.util.Optional) HistoryTimeSeries(io.pravega.controller.store.stream.records.HistoryTimeSeries) Futures(io.pravega.common.concurrent.Futures) Segment(io.pravega.client.segment.impl.Segment) CommittingTransactionsRecord(io.pravega.controller.store.stream.records.CommittingTransactionsRecord) NameUtils.computeSegmentId(io.pravega.shared.NameUtils.computeSegmentId) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) Exceptions(io.pravega.common.Exceptions) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) RetentionSet(io.pravega.controller.store.stream.records.RetentionSet) Mockito.spy(org.mockito.Mockito.spy) ArrayList(java.util.ArrayList) Strings(com.google.common.base.Strings) ReaderGroupConfigRecord(io.pravega.controller.store.stream.records.ReaderGroupConfigRecord) Lists(com.google.common.collect.Lists) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) RecordHelper(io.pravega.controller.store.stream.records.RecordHelper) SimpleEntry(java.util.AbstractMap.SimpleEntry) SealedSegmentsMapShard(io.pravega.controller.store.stream.records.SealedSegmentsMapShard) Before(org.junit.Before) NameUtils(io.pravega.shared.NameUtils) Assert.assertNotNull(org.junit.Assert.assertNotNull) WriterMark(io.pravega.controller.store.stream.records.WriterMark) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) TxnResource(io.pravega.controller.store.task.TxnResource) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) ExecutionException(java.util.concurrent.ExecutionException) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) AbstractMap(java.util.AbstractMap) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) Assert.assertNull(org.junit.Assert.assertNull) Version(io.pravega.controller.store.Version) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) Assert(org.junit.Assert) Collections(java.util.Collections) Mockito.reset(org.mockito.Mockito.reset) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) CompletableFuture(java.util.concurrent.CompletableFuture) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) SimpleEntry(java.util.AbstractMap.SimpleEntry) Test(org.junit.Test)

Example 23 with StreamConfigurationRecord

use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.

the class StreamMetadataStoreTest method updateTest.

@Test(timeout = 30000)
public void updateTest() throws Exception {
    final String scope = "ScopeUpdate";
    final String stream = "StreamUpdate";
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
    long start = System.currentTimeMillis();
    store.createScope(scope, null, executor).get();
    store.createStream(scope, stream, configuration, start, null, executor).get();
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
    final StreamConfiguration configuration2 = StreamConfiguration.builder().scalingPolicy(policy).build();
    StreamConfigurationRecord configProperty = store.getConfigurationRecord(scope, stream, null, executor).join().getObject();
    assertFalse(configProperty.isUpdating());
    // run update configuration multiple times
    assertTrue(Futures.await(store.startUpdateConfiguration(scope, stream, configuration2, null, executor)));
    store.setState(scope, stream, State.UPDATING, null, executor).join();
    configProperty = store.getConfigurationRecord(scope, stream, null, executor).join().getObject();
    assertTrue(configProperty.isUpdating());
    final StreamConfiguration configuration3 = StreamConfiguration.builder().scalingPolicy(policy).build();
    assertFalse(Futures.await(store.startUpdateConfiguration(scope, stream, configuration3, null, executor)));
    VersionedMetadata<StreamConfigurationRecord> existing = store.getConfigurationRecord(scope, stream, null, executor).join();
    assertTrue(Futures.await(store.completeUpdateConfiguration(scope, stream, existing, null, executor)));
    configProperty = store.getConfigurationRecord(scope, stream, null, executor).join().getObject();
    assertEquals(configuration2, configProperty.getStreamConfiguration());
    assertTrue(Futures.await(store.startUpdateConfiguration(scope, stream, configuration3, null, executor)));
    existing = store.getConfigurationRecord(scope, stream, null, executor).join();
    assertTrue(Futures.await(store.completeUpdateConfiguration(scope, stream, existing, null, executor)));
    store.setState(scope, stream, State.ACTIVE, null, executor).join();
}
Also used : ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) Test(org.junit.Test)

Example 24 with StreamConfigurationRecord

use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.

the class ZKStreamMetadataStoreTest method testScaleMetadata.

@Test
public void testScaleMetadata() throws Exception {
    String scope = "testScopeScale";
    String stream = "testStreamScale";
    ScalingPolicy policy = ScalingPolicy.fixed(3);
    StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
    SimpleEntry<Double, Double> segment1 = new SimpleEntry<>(0.0, 0.5);
    SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.5, 1.0);
    List<Map.Entry<Double, Double>> newRanges = Arrays.asList(segment1, segment2);
    store.createScope(scope, null, executor).get();
    store.createStream(scope, stream, configuration, System.currentTimeMillis(), null, executor).get();
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
    // set minimum number of segments to 1 so that we can also test scale downs
    configuration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
    store.startUpdateConfiguration(scope, stream, configuration, null, executor).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = store.getConfigurationRecord(scope, stream, null, executor).join();
    store.completeUpdateConfiguration(scope, stream, configRecord, null, executor).join();
    List<ScaleMetadata> scaleIncidents = store.getScaleMetadata(scope, stream, 0, Long.MAX_VALUE, null, executor).get();
    assertTrue(scaleIncidents.size() == 1);
    assertTrue(scaleIncidents.get(0).getSegments().size() == 3);
    // scale
    scale(scope, stream, scaleIncidents.get(0).getSegments(), newRanges);
    scaleIncidents = store.getScaleMetadata(scope, stream, 0, Long.MAX_VALUE, null, executor).get();
    assertTrue(scaleIncidents.size() == 2);
    assertTrue(scaleIncidents.get(0).getSegments().size() == 3);
    assertTrue(scaleIncidents.get(1).getSegments().size() == 2);
    // scale again
    scale(scope, stream, scaleIncidents.get(1).getSegments(), newRanges);
    scaleIncidents = store.getScaleMetadata(scope, stream, 0, Long.MAX_VALUE, null, executor).get();
    assertTrue(scaleIncidents.size() == 3);
    assertTrue(scaleIncidents.get(0).getSegments().size() == 3);
    assertTrue(scaleIncidents.get(1).getSegments().size() == 2);
    assertTrue(scaleIncidents.get(2).getSegments().size() == 2);
    // scale again
    scale(scope, stream, scaleIncidents.get(2).getSegments(), newRanges);
    scaleIncidents = store.getScaleMetadata(scope, stream, 0, Long.MAX_VALUE, null, executor).get();
    assertTrue(scaleIncidents.size() == 4);
    assertTrue(scaleIncidents.get(0).getSegments().size() == 3);
    assertTrue(scaleIncidents.get(1).getSegments().size() == 2);
    assertTrue(scaleIncidents.get(2).getSegments().size() == 2);
    assertTrue(scaleIncidents.get(3).getSegments().size() == 2);
}
Also used : ScalingPolicy(io.pravega.client.stream.ScalingPolicy) SimpleEntry(java.util.AbstractMap.SimpleEntry) SimpleEntry(java.util.AbstractMap.SimpleEntry) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) Test(org.junit.Test)

Example 25 with StreamConfigurationRecord

use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.

the class ZkStreamTest method testZkStream.

@Test(timeout = 30000)
public void testZkStream() throws Exception {
    double keyChunk = 1.0 / 5;
    final ScalingPolicy policy = ScalingPolicy.fixed(5);
    @Cleanup final StreamMetadataStore store = new ZKStreamMetadataStore(cli, executor);
    final String streamName = "test";
    store.createScope(SCOPE, null, executor).get();
    StreamConfiguration streamConfig = StreamConfiguration.builder().scalingPolicy(policy).build();
    store.createStream(SCOPE, streamName, streamConfig, System.currentTimeMillis(), null, executor).get();
    store.setState(SCOPE, streamName, State.ACTIVE, null, executor).get();
    OperationContext context = store.createStreamContext(SCOPE, streamName, 0L);
    // set minimum number of segments to 1 so that we can also test scale downs
    streamConfig = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
    store.startUpdateConfiguration(SCOPE, streamName, streamConfig, null, executor).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = store.getConfigurationRecord(SCOPE, streamName, null, executor).join();
    store.completeUpdateConfiguration(SCOPE, streamName, configRecord, null, executor).join();
    List<StreamSegmentRecord> segments = store.getActiveSegments(SCOPE, streamName, context, executor).get();
    assertEquals(segments.size(), 5);
    assertTrue(segments.stream().allMatch(x -> Lists.newArrayList(0L, 1L, 2L, 3L, 4L).contains(x.segmentId())));
    long start = segments.get(0).getCreationTime();
    assertEquals(store.getConfiguration(SCOPE, streamName, context, executor).get(), streamConfig);
    List<Map.Entry<Double, Double>> newRanges;
    // existing range 0 = 0 - .2, 1 = .2 - .4, 2 = .4 - .6, 3 = .6 - .8, 4 = .8 - 1.0
    // 3, 4 -> 5 = .6 - 1.0
    newRanges = Collections.singletonList(new AbstractMap.SimpleEntry<>(3 * keyChunk, 1.0));
    long scale1 = start + 10000;
    ArrayList<Long> sealedSegments = Lists.newArrayList(3L, 4L);
    long five = computeSegmentId(5, 1);
    VersionedMetadata<EpochTransitionRecord> versioned = store.submitScale(SCOPE, streamName, sealedSegments, newRanges, scale1, null, context, executor).get();
    VersionedMetadata<State> state = store.getVersionedState(SCOPE, streamName, null, executor).join();
    state = store.updateVersionedState(SCOPE, streamName, State.SCALING, state, null, executor).join();
    versioned = store.startScale(SCOPE, streamName, false, versioned, state, null, executor).join();
    store.scaleCreateNewEpochs(SCOPE, streamName, versioned, context, executor).get();
    store.scaleSegmentsSealed(SCOPE, streamName, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, context, executor).get();
    store.completeScale(SCOPE, streamName, versioned, null, executor).join();
    store.setState(SCOPE, streamName, State.ACTIVE, null, executor).join();
    segments = store.getActiveSegments(SCOPE, streamName, context, executor).get();
    assertEquals(segments.size(), 4);
    assertTrue(segments.stream().allMatch(x -> Lists.newArrayList(0L, 1L, 2L, five).contains(x.segmentId())));
    // 1 -> 6 = 0.2 -.3, 7 = .3 - .4
    // 2,5 -> 8 = .4 - 1.0
    newRanges = Arrays.asList(new AbstractMap.SimpleEntry<>(keyChunk, 0.3), new AbstractMap.SimpleEntry<>(0.3, 2 * keyChunk), new AbstractMap.SimpleEntry<>(2 * keyChunk, 1.0));
    long scale2 = scale1 + 10000;
    ArrayList<Long> sealedSegments1 = Lists.newArrayList(1L, 2L, five);
    long six = computeSegmentId(6, 2);
    long seven = computeSegmentId(7, 2);
    long eight = computeSegmentId(8, 2);
    versioned = store.submitScale(SCOPE, streamName, sealedSegments1, newRanges, scale2, null, context, executor).get();
    EpochTransitionRecord response = versioned.getObject();
    state = store.getVersionedState(SCOPE, streamName, null, executor).join();
    state = store.updateVersionedState(SCOPE, streamName, State.SCALING, state, null, executor).join();
    versioned = store.startScale(SCOPE, streamName, false, versioned, state, null, executor).join();
    store.scaleCreateNewEpochs(SCOPE, streamName, versioned, context, executor).get();
    store.scaleSegmentsSealed(SCOPE, streamName, sealedSegments1.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, context, executor).get();
    store.completeScale(SCOPE, streamName, versioned, null, executor).join();
    store.setState(SCOPE, streamName, State.ACTIVE, null, executor).join();
    segments = store.getActiveSegments(SCOPE, streamName, context, executor).get();
    assertEquals(segments.size(), 4);
    assertTrue(segments.stream().allMatch(x -> Lists.newArrayList(0L, six, seven, eight).contains(x.segmentId())));
    // 7 -> 9 = .3 - .35, 10 = .35 - .6
    // 8 -> 10 = .35 - .6, 11 = .6 - 1.0
    newRanges = Arrays.asList(new AbstractMap.SimpleEntry<>(0.3, 0.35), new AbstractMap.SimpleEntry<>(0.35, 3 * keyChunk), new AbstractMap.SimpleEntry<>(3 * keyChunk, 1.0));
    long scale3 = scale2 + 10000;
    long nine = computeSegmentId(9, 3);
    long ten = computeSegmentId(10, 3);
    long eleven = computeSegmentId(11, 3);
    ArrayList<Long> sealedSegments2 = Lists.newArrayList(seven, eight);
    versioned = store.submitScale(SCOPE, streamName, sealedSegments2, newRanges, scale3, null, context, executor).get();
    response = versioned.getObject();
    state = store.getVersionedState(SCOPE, streamName, null, executor).join();
    state = store.updateVersionedState(SCOPE, streamName, State.SCALING, state, null, executor).join();
    store.startScale(SCOPE, streamName, false, versioned, state, null, executor).join();
    store.scaleCreateNewEpochs(SCOPE, streamName, versioned, context, executor).get();
    store.scaleSegmentsSealed(SCOPE, streamName, sealedSegments2.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, context, executor).get();
    store.completeScale(SCOPE, streamName, versioned, null, executor).join();
    store.setState(SCOPE, streamName, State.ACTIVE, null, executor).join();
    segments = store.getActiveSegments(SCOPE, streamName, context, executor).get();
    assertEquals(segments.size(), 5);
    assertTrue(segments.stream().allMatch(x -> Lists.newArrayList(0L, six, nine, ten, eleven).contains(x.segmentId())));
    Map<Long, List<Long>> successors = store.getSuccessors(SCOPE, streamName, 0L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.isEmpty());
    successors = store.getSuccessors(SCOPE, streamName, 1L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.size() == 2 && successors.containsKey(six) && successors.get(six).containsAll(Collections.singleton(1L)) && successors.containsKey(seven) && successors.get(seven).containsAll(Collections.singleton(1L)));
    successors = store.getSuccessors(SCOPE, streamName, 2L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.size() == 1 && successors.containsKey(eight) && successors.get(eight).containsAll(Lists.newArrayList(2L, five)));
    successors = store.getSuccessors(SCOPE, streamName, 3L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.size() == 1 && successors.containsKey(five) && successors.get(five).containsAll(Lists.newArrayList(3L, 4L)));
    successors = store.getSuccessors(SCOPE, streamName, 4L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.size() == 1 && successors.containsKey(five) && successors.get(five).containsAll(Lists.newArrayList(3L, 4L)));
    successors = store.getSuccessors(SCOPE, streamName, five, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.size() == 1 && successors.containsKey(eight) && successors.get(eight).containsAll(Lists.newArrayList(2L, five)));
    successors = store.getSuccessors(SCOPE, streamName, six, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.isEmpty());
    successors = store.getSuccessors(SCOPE, streamName, seven, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.size() == 2 && successors.containsKey(nine) && successors.get(nine).containsAll(Collections.singleton(seven)) && successors.containsKey(ten) && successors.get(ten).containsAll(Lists.newArrayList(seven, eight)));
    successors = store.getSuccessors(SCOPE, streamName, eight, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.size() == 2 && successors.containsKey(eleven) && successors.get(eleven).containsAll(Collections.singleton(eight)) && successors.containsKey(ten) && successors.get(ten).containsAll(Lists.newArrayList(seven, eight)));
    successors = store.getSuccessors(SCOPE, streamName, nine, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.isEmpty());
    successors = store.getSuccessors(SCOPE, streamName, ten, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.isEmpty());
    successors = store.getSuccessors(SCOPE, streamName, eleven, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.isEmpty());
    // start -1
    Map<Long, Long> historicalSegments = store.getSegmentsAtHead(SCOPE, streamName, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertEquals(historicalSegments.size(), 5);
    assertTrue(historicalSegments.keySet().containsAll(Lists.newArrayList(0L, 1L, 2L, 3L, 4L)));
    // start + 1
    List<Long> segmentsInEpoch = store.getSegmentsInEpoch(SCOPE, streamName, 0, context, executor).get().stream().map(x -> x.segmentId()).collect(Collectors.toList());
    assertEquals(segmentsInEpoch.size(), 5);
    assertTrue(segmentsInEpoch.containsAll(Lists.newArrayList(0L, 1L, 2L, 3L, 4L)));
    // scale1
    segmentsInEpoch = store.getSegmentsInEpoch(SCOPE, streamName, 1, context, executor).get().stream().map(x -> x.segmentId()).collect(Collectors.toList());
    assertEquals(segmentsInEpoch.size(), 4);
    assertTrue(segmentsInEpoch.containsAll(Lists.newArrayList(0L, 1L, 2L, five)));
    // scale2
    segmentsInEpoch = store.getSegmentsInEpoch(SCOPE, streamName, 2, context, executor).get().stream().map(x -> x.segmentId()).collect(Collectors.toList());
    assertEquals(segmentsInEpoch.size(), 4);
    assertTrue(segmentsInEpoch.containsAll(Lists.newArrayList(0L, six, seven, eight)));
    // scale3
    segmentsInEpoch = store.getSegmentsInEpoch(SCOPE, streamName, 3, context, executor).get().stream().map(x -> x.segmentId()).collect(Collectors.toList());
    assertEquals(segmentsInEpoch.size(), 5);
    assertTrue(segmentsInEpoch.containsAll(Lists.newArrayList(0L, six, nine, ten, eleven)));
    assertFalse(store.isSealed(SCOPE, streamName, context, executor).get());
    assertNotEquals(0, store.getActiveSegments(SCOPE, streamName, context, executor).get().size());
    store.setSealed(SCOPE, streamName, context, executor).get();
    assertTrue(store.isSealed(SCOPE, streamName, context, executor).get());
    assertEquals(0, store.getActiveSegments(SCOPE, streamName, context, executor).get().size());
    // seal an already sealed stream.
    store.setSealed(SCOPE, streamName, context, executor).get();
    assertTrue(store.isSealed(SCOPE, streamName, context, executor).get());
    assertEquals(0, store.getActiveSegments(SCOPE, streamName, context, executor).get().size());
    // seal a non existing stream.
    AssertExtensions.assertFutureThrows("", store.setSealed(SCOPE, "nonExistentStream", null, executor), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
    store.markCold(SCOPE, streamName, 0L, System.currentTimeMillis() + 1000, null, executor).get();
    assertTrue(store.isCold(SCOPE, streamName, 0L, null, executor).get());
    Thread.sleep(1000);
    assertFalse(store.isCold(SCOPE, streamName, 0L, null, executor).get());
    store.markCold(SCOPE, streamName, 0L, System.currentTimeMillis() + 1000, null, executor).get();
    store.removeMarker(SCOPE, streamName, 0L, null, executor).get();
    assertFalse(store.isCold(SCOPE, streamName, 0L, null, executor).get());
}
Also used : Arrays(java.util.Arrays) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) AssertExtensions(io.pravega.test.common.AssertExtensions) ArgumentMatchers.eq(org.mockito.ArgumentMatchers.eq) Cleanup(lombok.Cleanup) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) Map(java.util.Map) After(org.junit.After) Mockito.doReturn(org.mockito.Mockito.doReturn) DeleteScopeStatus(io.pravega.controller.stream.api.grpc.v1.Controller.DeleteScopeStatus) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) Predicate(java.util.function.Predicate) ActiveTxnRecord(io.pravega.controller.store.stream.records.ActiveTxnRecord) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) ZKStoreHelper(io.pravega.controller.store.ZKStoreHelper) List(java.util.List) CuratorFramework(org.apache.curator.framework.CuratorFramework) Assert.assertFalse(org.junit.Assert.assertFalse) Optional(java.util.Optional) Futures(io.pravega.common.concurrent.Futures) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) CuratorFrameworkFactory(org.apache.curator.framework.CuratorFrameworkFactory) CreateScopeStatus(io.pravega.controller.stream.api.grpc.v1.Controller.CreateScopeStatus) NameUtils.computeSegmentId(io.pravega.shared.NameUtils.computeSegmentId) Exceptions(io.pravega.common.Exceptions) CompletableFuture(java.util.concurrent.CompletableFuture) Mockito.spy(org.mockito.Mockito.spy) BitConverter(io.pravega.common.util.BitConverter) ArrayList(java.util.ArrayList) RetryOneTime(org.apache.curator.retry.RetryOneTime) Lists(com.google.common.collect.Lists) TestingServerStarter(io.pravega.test.common.TestingServerStarter) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) TestingServer(org.apache.curator.test.TestingServer) Before(org.junit.Before) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) ExecutionException(java.util.concurrent.ExecutionException) Mockito(org.mockito.Mockito) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) AbstractMap(java.util.AbstractMap) TestOperationContext(io.pravega.controller.store.TestOperationContext) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) Assert(org.junit.Assert) Collections(java.util.Collections) Mockito.reset(org.mockito.Mockito.reset) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) Cleanup(lombok.Cleanup) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) List(java.util.List) ArrayList(java.util.ArrayList) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) TestOperationContext(io.pravega.controller.store.TestOperationContext) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) Test(org.junit.Test)

Aggregations

StreamConfigurationRecord (io.pravega.controller.store.stream.records.StreamConfigurationRecord)28 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)23 Test (org.junit.Test)22 ArgumentMatchers.anyString (org.mockito.ArgumentMatchers.anyString)16 ScalingPolicy (io.pravega.client.stream.ScalingPolicy)14 VersionedMetadata (io.pravega.controller.store.VersionedMetadata)10 HashMap (java.util.HashMap)10 CompletableFuture (java.util.concurrent.CompletableFuture)10 StreamTruncationRecord (io.pravega.controller.store.stream.records.StreamTruncationRecord)9 ArgumentMatchers.anyLong (org.mockito.ArgumentMatchers.anyLong)9 AtomicLong (java.util.concurrent.atomic.AtomicLong)8 RetentionPolicy (io.pravega.client.stream.RetentionPolicy)7 Lists (com.google.common.collect.Lists)6 Segment (io.pravega.client.segment.impl.Segment)6 ReaderGroupConfig (io.pravega.client.stream.ReaderGroupConfig)6 Stream (io.pravega.client.stream.Stream)6 StreamCut (io.pravega.client.stream.StreamCut)6 StreamCutImpl (io.pravega.client.stream.impl.StreamCutImpl)6 Exceptions (io.pravega.common.Exceptions)6 ExecutorServiceHelpers (io.pravega.common.concurrent.ExecutorServiceHelpers)6