use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class StreamMetadataStoreTest method scaleWithTxTest.
@Test(timeout = 30000)
public void scaleWithTxTest() throws Exception {
final String scope = "ScopeScaleWithTx";
final String stream = "StreamScaleWithTx";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
long start = System.currentTimeMillis();
store.createScope(scope, null, executor).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
long scaleTs = System.currentTimeMillis();
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.5, 0.75);
SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.75, 1.0);
List<Long> scale1SealedSegments = Collections.singletonList(1L);
// region Txn created before scale and during scale
// scale with transaction test
// first txn created before-scale
UUID txnId = store.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData tx01 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).get();
assertEquals(0, tx01.getEpoch());
VersionedMetadata<EpochTransitionRecord> versioned = store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment2, segment3), scaleTs, null, null, executor).join();
EpochTransitionRecord response = versioned.getObject();
Map<Long, Map.Entry<Double, Double>> scale1SegmentsCreated = response.getNewSegmentsWithRange();
final int epoch = response.getActiveEpoch();
assertEquals(0, epoch);
assertNotNull(scale1SegmentsCreated);
VersionedMetadata<State> state = store.getVersionedState(scope, stream, null, executor).join();
state = store.updateVersionedState(scope, stream, State.SCALING, state, null, executor).join();
versioned = store.startScale(scope, stream, false, versioned, state, null, executor).join();
// second txn created after new segments are created in segment table but not yet in history table
// assert that txn is created on old epoch
store.scaleCreateNewEpochs(scope, stream, versioned, null, executor).join();
txnId = store.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData tx02 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).get();
assertEquals(0, tx02.getEpoch());
assertEquals(0, (int) (tx02.getId().getMostSignificantBits() >> 32));
// third transaction created after new epoch created
txnId = store.generateTransactionId(scope, stream, null, executor).join();
store.sealTransaction(scope, stream, tx02.getId(), true, Optional.of(tx02.getVersion()), "", Long.MIN_VALUE, null, executor).get();
store.sealTransaction(scope, stream, tx01.getId(), true, Optional.of(tx01.getVersion()), "", Long.MIN_VALUE, null, executor).get();
store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, null, executor).join();
store.completeScale(scope, stream, versioned, null, executor).join();
VersionedTransactionData tx03 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).get();
assertEquals(0, tx03.getEpoch());
assertEquals(0, (int) (tx03.getId().getMostSignificantBits() >> 32));
store.setState(scope, stream, State.ACTIVE, null, executor).join();
// ensure that we can commit transactions on old epoch and roll over.
EpochRecord activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
// submit another scale request without starting the scale
List<Long> scale2SealedSegments = Collections.singletonList(0L);
long scaleTs2 = System.currentTimeMillis();
SimpleEntry<Double, Double> segment4 = new SimpleEntry<>(0.0, 0.25);
SimpleEntry<Double, Double> segment5 = new SimpleEntry<>(0.25, 0.5);
VersionedMetadata<EpochTransitionRecord> versioned2 = store.submitScale(scope, stream, scale2SealedSegments, Arrays.asList(segment4, segment5), scaleTs2, null, null, executor).join();
EpochTransitionRecord response2 = versioned2.getObject();
assertEquals(activeEpoch.getEpoch(), response2.getActiveEpoch());
VersionedMetadata<CommittingTransactionsRecord> record = store.startCommitTransactions(scope, stream, 100, null, executor).join().getKey();
store.setState(scope, stream, State.COMMITTING_TXN, null, executor).join();
record = store.startRollingTxn(scope, stream, activeEpoch.getEpoch(), record, null, executor).join();
store.rollingTxnCreateDuplicateEpochs(scope, stream, Collections.emptyMap(), System.currentTimeMillis(), record, null, executor).join();
store.completeRollingTxn(scope, stream, Collections.emptyMap(), record, null, executor).join();
store.completeCommitTransactions(scope, stream, record, null, executor, Collections.emptyMap()).join();
store.setState(scope, stream, State.ACTIVE, null, executor).join();
activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
assertEquals(3, activeEpoch.getEpoch());
assertEquals(1, activeEpoch.getReferenceEpoch());
assertEquals(3, activeEpoch.getSegments().size());
List<StreamSegmentRecord> txnDuplicate = store.getSegmentsInEpoch(scope, stream, 2, null, executor).join();
assertEquals(2, txnDuplicate.size());
List<StreamSegmentRecord> activeEpochDuplicate = store.getSegmentsInEpoch(scope, stream, 3, null, executor).join();
assertEquals(3, activeEpochDuplicate.size());
EpochRecord txnCommittedEpoch = store.getEpoch(scope, stream, 2, null, executor).join();
assertEquals(0, txnCommittedEpoch.getReferenceEpoch());
assertEquals(store.transactionStatus(scope, stream, tx01.getId(), null, executor).join(), TxnStatus.COMMITTED);
assertEquals(store.transactionStatus(scope, stream, tx02.getId(), null, executor).join(), TxnStatus.COMMITTED);
assertEquals(store.transactionStatus(scope, stream, tx03.getId(), null, executor).join(), TxnStatus.OPEN);
store.sealTransaction(scope, stream, tx03.getId(), true, Optional.of(tx03.getVersion()), "", Long.MIN_VALUE, null, executor).get();
// endregion
// region verify migrate request for manual scale
// now start manual scale against previously submitted scale request that was on old epoch from before rolling txn.
// verify that it gets migrated to latest duplicate epoch
state = store.getVersionedState(scope, stream, null, executor).join();
state = store.updateVersionedState(scope, stream, State.SCALING, state, null, executor).join();
versioned2 = store.submitScale(scope, stream, scale2SealedSegments, Arrays.asList(segment4, segment5), scaleTs2, null, null, executor).join();
versioned2 = store.startScale(scope, stream, true, versioned2, state, null, executor).join();
store.scaleCreateNewEpochs(scope, stream, versioned2, null, executor).join();
txnId = store.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData tx14 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).get();
assertEquals(1, tx14.getEpoch());
store.sealTransaction(scope, stream, tx14.getId(), true, Optional.of(tx14.getVersion()), "", Long.MIN_VALUE, null, executor).get();
// verify that new txns can be created and are created on original epoch
txnId = store.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData tx15 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).get();
assertEquals(1, tx15.getEpoch());
store.scaleCreateNewEpochs(scope, stream, versioned2, null, executor).join();
store.scaleSegmentsSealed(scope, stream, Collections.emptyMap(), versioned2, null, executor).join();
store.completeScale(scope, stream, versioned2, null, executor).join();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
assertEquals(4, activeEpoch.getEpoch());
assertEquals(4, activeEpoch.getReferenceEpoch());
store.sealTransaction(scope, stream, tx15.getId(), true, Optional.of(tx15.getVersion()), "", Long.MIN_VALUE, null, executor).get();
record = store.startCommitTransactions(scope, stream, 100, null, executor).join().getKey();
store.setState(scope, stream, State.COMMITTING_TXN, null, executor).get();
record = store.startRollingTxn(scope, stream, activeEpoch.getEpoch(), record, null, executor).join();
store.rollingTxnCreateDuplicateEpochs(scope, stream, Collections.emptyMap(), System.currentTimeMillis(), record, null, executor).join();
store.completeRollingTxn(scope, stream, Collections.emptyMap(), record, null, executor).join();
store.completeCommitTransactions(scope, stream, record, null, executor, Collections.emptyMap()).join();
store.setState(scope, stream, State.ACTIVE, null, executor).join();
activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
assertEquals(6, activeEpoch.getEpoch());
assertEquals(4, activeEpoch.getReferenceEpoch());
// endregion
}
use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class StreamMetadataStoreTest method scaleWithTxnForInconsistentScanerios.
@Test(timeout = 30000)
public void scaleWithTxnForInconsistentScanerios() throws Exception {
final String scope = "ScopeScaleWithTx";
final String stream = "StreamScaleWithTx1";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
long start = System.currentTimeMillis();
store.createScope(scope, null, executor).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
UUID txnId = store.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData tx1 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).get();
store.sealTransaction(scope, stream, txnId, true, Optional.of(tx1.getVersion()), "", Long.MIN_VALUE, null, executor).get();
long scaleTs = System.currentTimeMillis();
List<Long> scale1SealedSegments = Collections.singletonList(0L);
// run a scale on segment 1
VersionedMetadata<EpochTransitionRecord> versioned = store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(new AbstractMap.SimpleEntry<>(0.0, 0.25), new AbstractMap.SimpleEntry<>(0.25, 0.5)), scaleTs, null, null, executor).join();
EpochTransitionRecord response = versioned.getObject();
assertEquals(0, response.getActiveEpoch());
VersionedMetadata<State> state = store.getVersionedState(scope, stream, null, executor).join();
state = store.updateVersionedState(scope, stream, State.SCALING, state, null, executor).join();
store.startScale(scope, stream, false, versioned, state, null, executor).join();
store.scaleCreateNewEpochs(scope, stream, versioned, null, executor).join();
store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, null, executor).join();
store.completeScale(scope, stream, versioned, null, executor).join();
store.setState(scope, stream, State.ACTIVE, null, executor).join();
// start second scale
versioned = store.submitScale(scope, stream, Collections.singletonList(1L), Arrays.asList(new AbstractMap.SimpleEntry<>(0.5, 0.75), new AbstractMap.SimpleEntry<>(0.75, 1.0)), scaleTs, null, null, executor).join();
response = versioned.getObject();
assertEquals(1, response.getActiveEpoch());
EpochRecord activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
VersionedMetadata<CommittingTransactionsRecord> record = store.startCommitTransactions(scope, stream, 100, null, executor).join().getKey();
store.setState(scope, stream, State.COMMITTING_TXN, null, executor).join();
record = store.startRollingTxn(scope, stream, activeEpoch.getEpoch(), record, null, executor).join();
store.rollingTxnCreateDuplicateEpochs(scope, stream, Collections.emptyMap(), System.currentTimeMillis(), record, null, executor).join();
store.completeRollingTxn(scope, stream, Collections.emptyMap(), record, null, executor).join();
store.completeCommitTransactions(scope, stream, record, null, executor, Collections.emptyMap()).join();
store.setState(scope, stream, State.ACTIVE, null, executor).join();
state = store.getVersionedState(scope, stream, null, executor).join();
state = store.updateVersionedState(scope, stream, State.SCALING, state, null, executor).join();
versioned = store.submitScale(scope, stream, Collections.singletonList(1L), Arrays.asList(new AbstractMap.SimpleEntry<>(0.5, 0.75), new AbstractMap.SimpleEntry<>(0.75, 1.0)), scaleTs, null, null, executor).join();
response = versioned.getObject();
assertEquals(1, response.getActiveEpoch());
AssertExtensions.assertFutureThrows("attempting to create new segments against inconsistent epoch transition record", store.startScale(scope, stream, false, versioned, state, null, executor), e -> Exceptions.unwrap(e) instanceof IllegalStateException);
// verify that state is reset to active
State stateVal = store.getState(scope, stream, true, null, executor).join();
assertEquals(State.ACTIVE, stateVal);
}
use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class StreamMetadataStoreTest method scale.
private void scale(String scope, String stream, long scaleTs, List<Map.Entry<Double, Double>> newSegments, List<Long> scale1SealedSegments) {
VersionedMetadata<EpochTransitionRecord> versioned = store.submitScale(scope, stream, scale1SealedSegments, newSegments, scaleTs, null, null, executor).join();
EpochTransitionRecord response = versioned.getObject();
Map<Long, Map.Entry<Double, Double>> scale1SegmentsCreated = response.getNewSegmentsWithRange();
final int epoch = response.getActiveEpoch();
assertEquals(0, epoch);
assertNotNull(scale1SegmentsCreated);
VersionedMetadata<State> state = store.getVersionedState(scope, stream, null, executor).join();
state = store.updateVersionedState(scope, stream, State.SCALING, state, null, executor).join();
versioned = store.startScale(scope, stream, false, versioned, state, null, executor).join();
// second txn created after new segments are created in segment table but not yet in history table
// assert that txn is created on old epoch
store.scaleCreateNewEpochs(scope, stream, versioned, null, executor).join();
store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, null, executor).join();
store.completeScale(scope, stream, versioned, null, executor).join();
state = store.updateVersionedState(scope, stream, State.ACTIVE, state, null, executor).join();
}
use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class StreamMetadataStoreTest method concurrentStartScaleTest.
@Test(timeout = 30000)
public void concurrentStartScaleTest() throws Exception {
final String scope = "ScopeScale";
final String stream = "StreamScale1";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
long start = System.currentTimeMillis();
store.createScope(scope, null, executor).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
// set minimum number of segments to 1
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
store.startUpdateConfiguration(scope, stream, config, null, executor).join();
VersionedMetadata<StreamConfigurationRecord> configRecord = store.getConfigurationRecord(scope, stream, null, executor).join();
store.completeUpdateConfiguration(scope, stream, configRecord, null, executor).join();
// region concurrent start scale
// Test scenario where one request starts and completes as the other is waiting on StartScale.createEpochTransition
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.0, 1.0);
List<Long> segmentsToSeal = Arrays.asList(0L, 1L);
long scaleTs = System.currentTimeMillis();
@SuppressWarnings("unchecked") PersistentStreamBase streamObj = (PersistentStreamBase) ((AbstractStreamMetadataStore) store).getStream(scope, stream, null);
PersistentStreamBase streamObjSpied = spy(streamObj);
CompletableFuture<Void> latch = new CompletableFuture<>();
CompletableFuture<Void> updateEpochTransitionCalled = new CompletableFuture<>();
doAnswer(x -> streamObj.getEpochTransitionNode(x.getArgument(0))).when(streamObjSpied).getEpochTransitionNode(any());
doAnswer(x -> streamObj.updateEpochTransitionNode(x.getArgument(0), x.getArgument(1))).when(streamObjSpied).updateEpochTransitionNode(any(), any());
doAnswer(x -> CompletableFuture.runAsync(() -> {
VersionedMetadata<EpochTransitionRecord> argument = x.getArgument(0);
EpochTransitionRecord record = argument.getObject();
if (record.getSegmentsToSeal().containsAll(segmentsToSeal)) {
// wait until we create epoch transition outside of this method
updateEpochTransitionCalled.complete(null);
latch.join();
}
}).thenCompose(v -> streamObj.updateEpochTransitionNode(x.getArgument(0), x.getArgument(1)))).when(streamObjSpied).updateEpochTransitionNode(any(), any());
StreamOperationContext context = new StreamOperationContext(((AbstractStreamMetadataStore) store).getScope(scope, null), streamObjSpied, 0L);
// the following should be stuck at createEpochTransition
CompletableFuture<VersionedMetadata<EpochTransitionRecord>> response = store.submitScale(scope, stream, segmentsToSeal, Collections.singletonList(segment2), scaleTs, null, context, executor);
updateEpochTransitionCalled.join();
// create new epochs corresponding to new scale as the previous scale waits to create epoch transition record
SimpleEntry<Double, Double> segment2p = new SimpleEntry<>(0.0, 0.5);
List<Long> segmentsToSeal2 = Collections.singletonList(0L);
long scaleTs2 = System.currentTimeMillis();
streamObjSpied.getEpochRecord(0, context).thenCompose(epochRecord -> {
EpochTransitionRecord record = RecordHelper.computeEpochTransition(epochRecord, segmentsToSeal2, Collections.singletonList(segment2p), scaleTs2);
return streamObjSpied.getEpochTransition(context).thenCompose(existing -> streamObjSpied.updateEpochTransitionNode(new VersionedMetadata<>(record, existing.getVersion()), context)).thenApply(v -> new VersionedMetadata<>(record, v));
}).thenCompose(epochRecord -> store.getVersionedState(scope, stream, context, executor).thenCompose(state -> store.updateVersionedState(scope, stream, State.SCALING, state, context, executor).thenCompose(updatedState -> store.startScale(scope, stream, false, epochRecord, updatedState, context, executor)).thenCompose(x -> store.scaleCreateNewEpochs(scope, stream, epochRecord, context, executor)).thenCompose(x -> store.scaleSegmentsSealed(scope, stream, segmentsToSeal2.stream().collect(Collectors.toMap(r -> r, r -> 0L)), epochRecord, context, executor)).thenCompose(x -> store.completeScale(scope, stream, epochRecord, context, executor)))).thenCompose(y -> store.setState(scope, stream, State.ACTIVE, context, executor)).join();
latch.complete(null);
// first scale should fail in attempting to update epoch transition record.
AssertExtensions.assertSuppliedFutureThrows("WriteConflict in start scale", () -> response, e -> Exceptions.unwrap(e) instanceof StoreException.WriteConflictException);
VersionedMetadata<EpochTransitionRecord> versioned = streamObj.getEpochTransition(context).join();
EpochTransitionRecord epochTransitionRecord = versioned.getObject();
assertEquals(EpochTransitionRecord.EMPTY, epochTransitionRecord);
// now that start scale succeeded, we should set the state to scaling.
VersionedMetadata<State> state = store.getVersionedState(scope, stream, context, executor).join();
state = store.updateVersionedState(scope, stream, State.SCALING, state, context, executor).join();
// now call first step of scaling -- createNewSegments. this should throw exception
AssertExtensions.assertFutureThrows("epoch transition was supposed to be invalid", store.startScale(scope, stream, false, versioned, state, context, executor), e -> Exceptions.unwrap(e) instanceof IllegalStateException);
// verify that state is reset to ACTIVE
assertEquals(State.ACTIVE, store.getState(scope, stream, true, context, executor).join());
// endregion
}
use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class ControllerServiceTest method setup.
@Before
public void setup() throws Exception {
final TaskMetadataStore taskMetadataStore = TaskStoreFactory.createZKStore(PRAVEGA_ZK_CURATOR_RESOURCE.client, executor);
final HostControllerStore hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
BucketStore bucketStore = StreamStoreFactory.createInMemoryBucketStore();
connectionPool = new ConnectionPoolImpl(ClientConfig.builder().build(), new SocketConnectionFactoryImpl(ClientConfig.builder().build()));
SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
kvtMetadataTasks = new TableMetadataTasks(kvtStore, segmentHelper, executor, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, new SegmentHelper(connectionPool, hostStore, executor), executor, null, requestTracker);
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final ScalingPolicy policy2 = ScalingPolicy.fixed(3);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
final StreamConfiguration configuration2 = StreamConfiguration.builder().scalingPolicy(policy2).build();
// createScope
streamStore.createScope(SCOPE, null, executor).get();
// region createStream
startTs = System.currentTimeMillis();
OperationContext context = streamStore.createStreamContext(SCOPE, stream1, 0L);
streamStore.createStream(SCOPE, stream1, configuration1, startTs, context, executor).get();
streamStore.setState(SCOPE, stream1, State.ACTIVE, context, executor).get();
OperationContext context2 = streamStore.createStreamContext(SCOPE, stream2, 0L);
streamStore.createStream(SCOPE, stream2, configuration2, startTs, context2, executor).get();
streamStore.setState(SCOPE, stream2, State.ACTIVE, context2, executor).get();
// endregion
// region scaleSegments
SimpleEntry<Double, Double> segment1 = new SimpleEntry<>(0.5, 0.75);
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.75, 1.0);
List<Long> sealedSegments = Collections.singletonList(1L);
scaleTs = System.currentTimeMillis();
VersionedMetadata<EpochTransitionRecord> record = streamStore.submitScale(SCOPE, stream1, sealedSegments, Arrays.asList(segment1, segment2), startTs, null, null, executor).get();
VersionedMetadata<State> state = streamStore.getVersionedState(SCOPE, stream1, null, executor).get();
state = streamStore.updateVersionedState(SCOPE, stream1, State.SCALING, state, null, executor).get();
record = streamStore.startScale(SCOPE, stream1, false, record, state, null, executor).get();
streamStore.scaleCreateNewEpochs(SCOPE, stream1, record, null, executor).get();
streamStore.scaleSegmentsSealed(SCOPE, stream1, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), record, null, executor).get();
streamStore.completeScale(SCOPE, stream1, record, null, executor).get();
streamStore.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.0, 0.5);
SimpleEntry<Double, Double> segment4 = new SimpleEntry<>(0.5, 0.75);
SimpleEntry<Double, Double> segment5 = new SimpleEntry<>(0.75, 1.0);
sealedSegments = Arrays.asList(0L, 1L, 2L);
record = streamStore.submitScale(SCOPE, stream2, sealedSegments, Arrays.asList(segment3, segment4, segment5), scaleTs, null, null, executor).get();
state = streamStore.getVersionedState(SCOPE, stream2, null, executor).get();
state = streamStore.updateVersionedState(SCOPE, stream2, State.SCALING, state, null, executor).get();
record = streamStore.startScale(SCOPE, stream2, false, record, state, null, executor).get();
streamStore.scaleCreateNewEpochs(SCOPE, stream2, record, null, executor).get();
streamStore.scaleSegmentsSealed(SCOPE, stream2, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), record, null, executor).get();
streamStore.completeScale(SCOPE, stream2, record, null, executor).get();
streamStore.setState(SCOPE, stream2, State.ACTIVE, null, executor).get();
// endregion
}
Aggregations