Search in sources :

Example 26 with EpochRecord

use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.

the class ScaleRequestHandlerTest method testInconsistentScaleRequestAfterRollingTxn.

@Test(timeout = 30000)
public void testInconsistentScaleRequestAfterRollingTxn() throws Exception {
    // This test checks a scenario where after rolling txn, if an outstanding scale request
    // was present, its epoch consistency should fail
    String stream = "newStream";
    StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 2)).build();
    streamMetadataTasks.createStream(scope, stream, config, System.currentTimeMillis(), 0L).get();
    EventWriterMock writer = new EventWriterMock();
    streamMetadataTasks.setRequestEventWriter(writer);
    ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
    StreamRequestHandler requestHandler = new StreamRequestHandler(null, scaleRequestHandler, null, null, null, null, null, null, null, streamStore, null, executor);
    CommitRequestHandler commitRequestHandler = new CommitRequestHandler(streamStore, streamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
    // 1 create transaction on old epoch and set it to committing
    UUID txnIdOldEpoch = streamStore.generateTransactionId(scope, stream, null, executor).join();
    VersionedTransactionData txnData = streamStore.createTransaction(scope, stream, txnIdOldEpoch, 10000, 10000, null, executor).join();
    streamStore.sealTransaction(scope, stream, txnData.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
    UUID txnIdOldEpoch2 = streamStore.generateTransactionId(scope, stream, null, executor).join();
    VersionedTransactionData txnData2 = streamStore.createTransaction(scope, stream, txnIdOldEpoch2, 10000, 10000, null, executor).join();
    streamStore.sealTransaction(scope, stream, txnData2.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
    EpochRecord epochZero = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
    assertEquals(0, epochZero.getEpoch());
    // 2. start scale
    requestHandler.process(new ScaleOpEvent(scope, stream, Lists.newArrayList(0L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.25), new AbstractMap.SimpleEntry<>(0.25, 0.5)), false, System.currentTimeMillis(), System.currentTimeMillis()), () -> false).join();
    // 3. verify that scale is complete
    State state = streamStore.getState(scope, stream, true, null, executor).join();
    assertEquals(State.ACTIVE, state);
    // 4. just submit a new scale. don't let it run. this should create an epoch transition. state should still be active
    streamStore.submitScale(scope, stream, Lists.newArrayList(1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.5, 0.75), new AbstractMap.SimpleEntry<>(0.75, 1.0)), System.currentTimeMillis(), null, null, executor).join();
    // 5. commit on old epoch. this should roll over.
    assertTrue(Futures.await(commitRequestHandler.processEvent(new CommitEvent(scope, stream, txnData.getEpoch()))));
    TxnStatus txnStatus = streamStore.transactionStatus(scope, stream, txnIdOldEpoch, null, executor).join();
    assertEquals(TxnStatus.COMMITTED, txnStatus);
    // 6. run scale. this should fail in scaleCreateNewEpochs with IllegalArgumentException with epochTransitionConsistent
    AssertExtensions.assertFutureThrows("epoch transition should be inconsistent", requestHandler.process(new ScaleOpEvent(scope, stream, Lists.newArrayList(1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.5, 0.75), new AbstractMap.SimpleEntry<>(0.75, 1.0)), false, System.currentTimeMillis(), System.currentTimeMillis()), () -> false), e -> Exceptions.unwrap(e) instanceof IllegalStateException);
    state = streamStore.getState(scope, stream, true, null, executor).join();
    assertEquals(State.ACTIVE, state);
}
Also used : EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ScaleOperationTask(io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask) VersionedTransactionData(io.pravega.controller.store.stream.VersionedTransactionData) CommitRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.CommitRequestHandler) ScaleOpEvent(io.pravega.shared.controller.event.ScaleOpEvent) AbstractMap(java.util.AbstractMap) StreamRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.StreamRequestHandler) State(io.pravega.controller.store.stream.State) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) CommitEvent(io.pravega.shared.controller.event.CommitEvent) UUID(java.util.UUID) TxnStatus(io.pravega.controller.store.stream.TxnStatus) Test(org.junit.Test)

Example 27 with EpochRecord

use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.

the class ScaleRequestHandlerTest method testScaleRequestWithMinimumSegment.

@Test(timeout = 30000)
public void testScaleRequestWithMinimumSegment() throws ExecutionException, InterruptedException {
    AutoScaleTask requestHandler = new AutoScaleTask(streamMetadataTasks, streamStore, executor);
    ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
    StreamRequestHandler multiplexer = new StreamRequestHandler(requestHandler, scaleRequestHandler, null, null, null, null, null, null, null, streamStore, null, executor);
    EventWriterMock writer = new EventWriterMock();
    streamMetadataTasks.setRequestEventWriter(writer);
    String stream = "mystream";
    StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 5)).build();
    streamMetadataTasks.createStream(scope, stream, config, System.currentTimeMillis(), 0L).get();
    // change stream configuration to min segment count = 4
    config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 4)).build();
    streamStore.startUpdateConfiguration(scope, stream, config, null, executor).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = streamStore.getConfigurationRecord(scope, stream, null, executor).join();
    streamStore.completeUpdateConfiguration(scope, stream, configRecord, null, executor).join();
    // process first auto scale down event. it should only mark the segment as cold
    multiplexer.process(new AutoScaleEvent(scope, stream, 1L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
    assertTrue(writer.queue.isEmpty());
    assertTrue(streamStore.isCold(scope, stream, 1L, null, executor).join());
    // process second auto scale down event. since its not for an immediate neighbour so it should only mark the segment as cold
    multiplexer.process(new AutoScaleEvent(scope, stream, 3L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
    assertTrue(streamStore.isCold(scope, stream, 3L, null, executor).join());
    // no scale event should be posted
    assertTrue(writer.queue.isEmpty());
    // process third auto scale down event. This should result in a scale op event being posted to merge segments 0, 1
    multiplexer.process(new AutoScaleEvent(scope, stream, 0L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
    assertTrue(streamStore.isCold(scope, stream, 0L, null, executor).join());
    // verify that a new event has been posted
    assertEquals(1, writer.queue.size());
    ControllerEvent event = writer.queue.take();
    assertTrue(event instanceof ScaleOpEvent);
    ScaleOpEvent scaleDownEvent1 = (ScaleOpEvent) event;
    assertEquals(1, scaleDownEvent1.getNewRanges().size());
    assertEquals(2, scaleDownEvent1.getSegmentsToSeal().size());
    assertTrue(scaleDownEvent1.getSegmentsToSeal().contains(0L));
    assertTrue(scaleDownEvent1.getSegmentsToSeal().contains(1L));
    // process fourth auto scale down event. This should result in a scale op event being posted to merge segments 3, 4
    multiplexer.process(new AutoScaleEvent(scope, stream, 4L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
    assertTrue(streamStore.isCold(scope, stream, 4L, null, executor).join());
    // verify that a new event has been posted
    assertEquals(1, writer.queue.size());
    event = writer.queue.take();
    assertTrue(event instanceof ScaleOpEvent);
    ScaleOpEvent scaleDownEvent2 = (ScaleOpEvent) event;
    assertEquals(1, scaleDownEvent2.getNewRanges().size());
    assertEquals(2, scaleDownEvent2.getSegmentsToSeal().size());
    assertTrue(scaleDownEvent2.getSegmentsToSeal().contains(3L));
    assertTrue(scaleDownEvent2.getSegmentsToSeal().contains(4L));
    // process first scale down event, this should submit scale and scale the stream down to 4 segments
    multiplexer.process(scaleDownEvent1, () -> false).join();
    EpochRecord activeEpoch = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
    List<StreamSegmentRecord> segments = activeEpoch.getSegments();
    assertEquals(1, activeEpoch.getEpoch());
    assertEquals(4, segments.size());
    assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 2));
    assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 3));
    assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 4));
    assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 5));
    // process second scale down event, this should submit scale and scale the stream down to 4 segments
    multiplexer.process(scaleDownEvent2, () -> false).join();
    // verify that no scale has happened
    activeEpoch = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
    // verify that no scale has happened.
    assertEquals(1, activeEpoch.getEpoch());
    assertEquals(4, segments.size());
    assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 2));
    assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 3));
    assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 4));
    assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 5));
}
Also used : CommitEvent(io.pravega.shared.controller.event.CommitEvent) EventStreamWriter(io.pravega.client.stream.EventStreamWriter) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) AssertExtensions(io.pravega.test.common.AssertExtensions) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) InetAddress(java.net.InetAddress) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) StoreException(io.pravega.controller.store.stream.StoreException) AutoScaleTask(io.pravega.controller.server.eventProcessor.requesthandlers.AutoScaleTask) TaskMetadataStore(io.pravega.controller.store.task.TaskMetadataStore) Map(java.util.Map) After(org.junit.After) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Mockito.doReturn(org.mockito.Mockito.doReturn) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) Predicate(java.util.function.Predicate) EpochTransitionOperationExceptions(io.pravega.controller.store.stream.EpochTransitionOperationExceptions) BlockingQueue(java.util.concurrent.BlockingQueue) RequestTracker(io.pravega.common.tracing.RequestTracker) UUID(java.util.UUID) ScaleOperationTask(io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Collectors(java.util.stream.Collectors) ControllerEvent(io.pravega.shared.controller.event.ControllerEvent) List(java.util.List) CuratorFramework(org.apache.curator.framework.CuratorFramework) Config(io.pravega.controller.util.Config) Assert.assertFalse(org.junit.Assert.assertFalse) TxnStatus(io.pravega.controller.store.stream.TxnStatus) VersionedTransactionData(io.pravega.controller.store.stream.VersionedTransactionData) Optional(java.util.Optional) StreamMetadataStore(io.pravega.controller.store.stream.StreamMetadataStore) Futures(io.pravega.common.concurrent.Futures) GrpcAuthHelper(io.pravega.controller.server.security.auth.GrpcAuthHelper) Mockito.mock(org.mockito.Mockito.mock) NotImplementedException(org.apache.commons.lang3.NotImplementedException) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) CuratorFrameworkFactory(org.apache.curator.framework.CuratorFrameworkFactory) StreamMetrics(io.pravega.controller.metrics.StreamMetrics) StreamStoreFactory(io.pravega.controller.store.stream.StreamStoreFactory) TransactionMetrics(io.pravega.controller.metrics.TransactionMetrics) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) AutoScaleEvent(io.pravega.shared.controller.event.AutoScaleEvent) NameUtils.computeSegmentId(io.pravega.shared.NameUtils.computeSegmentId) ConnectionFactory(io.pravega.client.connection.impl.ConnectionFactory) SegmentHelper(io.pravega.controller.server.SegmentHelper) Exceptions(io.pravega.common.Exceptions) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) ArgumentMatchers.anyBoolean(org.mockito.ArgumentMatchers.anyBoolean) Mockito.spy(org.mockito.Mockito.spy) ArrayList(java.util.ArrayList) BucketStore(io.pravega.controller.store.stream.BucketStore) Lists(com.google.common.collect.Lists) AbortEvent(io.pravega.shared.controller.event.AbortEvent) ExponentialBackoffRetry(org.apache.curator.retry.ExponentialBackoffRetry) TestingServerStarter(io.pravega.test.common.TestingServerStarter) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) TestingServer(org.apache.curator.test.TestingServer) StreamMetadataTasks(io.pravega.controller.task.Stream.StreamMetadataTasks) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) Before(org.junit.Before) EventWriterConfig(io.pravega.client.stream.EventWriterConfig) NameUtils(io.pravega.shared.NameUtils) SegmentHelperMock(io.pravega.controller.mocks.SegmentHelperMock) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) Mockito.times(org.mockito.Mockito.times) UnknownHostException(java.net.UnknownHostException) Mockito.verify(org.mockito.Mockito.verify) ExecutionException(java.util.concurrent.ExecutionException) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) AbstractMap(java.util.AbstractMap) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) TaskStoreFactory(io.pravega.controller.store.task.TaskStoreFactory) StreamTransactionMetadataTasks(io.pravega.controller.task.Stream.StreamTransactionMetadataTasks) StreamRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.StreamRequestHandler) State(io.pravega.controller.store.stream.State) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) CommitRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.CommitRequestHandler) ScaleOpEvent(io.pravega.shared.controller.event.ScaleOpEvent) Mockito.reset(org.mockito.Mockito.reset) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) ClientConfig(io.pravega.client.ClientConfig) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ScaleOperationTask(io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask) AutoScaleEvent(io.pravega.shared.controller.event.AutoScaleEvent) ScaleOpEvent(io.pravega.shared.controller.event.ScaleOpEvent) ControllerEvent(io.pravega.shared.controller.event.ControllerEvent) AutoScaleTask(io.pravega.controller.server.eventProcessor.requesthandlers.AutoScaleTask) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) StreamRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.StreamRequestHandler) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) Test(org.junit.Test)

Example 28 with EpochRecord

use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.

the class StreamMetadataStoreTest method scaleWithTxTest.

@Test(timeout = 30000)
public void scaleWithTxTest() throws Exception {
    final String scope = "ScopeScaleWithTx";
    final String stream = "StreamScaleWithTx";
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
    long start = System.currentTimeMillis();
    store.createScope(scope, null, executor).get();
    store.createStream(scope, stream, configuration, start, null, executor).get();
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
    long scaleTs = System.currentTimeMillis();
    SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.5, 0.75);
    SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.75, 1.0);
    List<Long> scale1SealedSegments = Collections.singletonList(1L);
    // region Txn created before scale and during scale
    // scale with transaction test
    // first txn created before-scale
    UUID txnId = store.generateTransactionId(scope, stream, null, executor).join();
    VersionedTransactionData tx01 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).get();
    assertEquals(0, tx01.getEpoch());
    VersionedMetadata<EpochTransitionRecord> versioned = store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment2, segment3), scaleTs, null, null, executor).join();
    EpochTransitionRecord response = versioned.getObject();
    Map<Long, Map.Entry<Double, Double>> scale1SegmentsCreated = response.getNewSegmentsWithRange();
    final int epoch = response.getActiveEpoch();
    assertEquals(0, epoch);
    assertNotNull(scale1SegmentsCreated);
    VersionedMetadata<State> state = store.getVersionedState(scope, stream, null, executor).join();
    state = store.updateVersionedState(scope, stream, State.SCALING, state, null, executor).join();
    versioned = store.startScale(scope, stream, false, versioned, state, null, executor).join();
    // second txn created after new segments are created in segment table but not yet in history table
    // assert that txn is created on old epoch
    store.scaleCreateNewEpochs(scope, stream, versioned, null, executor).join();
    txnId = store.generateTransactionId(scope, stream, null, executor).join();
    VersionedTransactionData tx02 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).get();
    assertEquals(0, tx02.getEpoch());
    assertEquals(0, (int) (tx02.getId().getMostSignificantBits() >> 32));
    // third transaction created after new epoch created
    txnId = store.generateTransactionId(scope, stream, null, executor).join();
    store.sealTransaction(scope, stream, tx02.getId(), true, Optional.of(tx02.getVersion()), "", Long.MIN_VALUE, null, executor).get();
    store.sealTransaction(scope, stream, tx01.getId(), true, Optional.of(tx01.getVersion()), "", Long.MIN_VALUE, null, executor).get();
    store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, null, executor).join();
    store.completeScale(scope, stream, versioned, null, executor).join();
    VersionedTransactionData tx03 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).get();
    assertEquals(0, tx03.getEpoch());
    assertEquals(0, (int) (tx03.getId().getMostSignificantBits() >> 32));
    store.setState(scope, stream, State.ACTIVE, null, executor).join();
    // ensure that we can commit transactions on old epoch and roll over.
    EpochRecord activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
    // submit another scale request without starting the scale
    List<Long> scale2SealedSegments = Collections.singletonList(0L);
    long scaleTs2 = System.currentTimeMillis();
    SimpleEntry<Double, Double> segment4 = new SimpleEntry<>(0.0, 0.25);
    SimpleEntry<Double, Double> segment5 = new SimpleEntry<>(0.25, 0.5);
    VersionedMetadata<EpochTransitionRecord> versioned2 = store.submitScale(scope, stream, scale2SealedSegments, Arrays.asList(segment4, segment5), scaleTs2, null, null, executor).join();
    EpochTransitionRecord response2 = versioned2.getObject();
    assertEquals(activeEpoch.getEpoch(), response2.getActiveEpoch());
    VersionedMetadata<CommittingTransactionsRecord> record = store.startCommitTransactions(scope, stream, 100, null, executor).join().getKey();
    store.setState(scope, stream, State.COMMITTING_TXN, null, executor).join();
    record = store.startRollingTxn(scope, stream, activeEpoch.getEpoch(), record, null, executor).join();
    store.rollingTxnCreateDuplicateEpochs(scope, stream, Collections.emptyMap(), System.currentTimeMillis(), record, null, executor).join();
    store.completeRollingTxn(scope, stream, Collections.emptyMap(), record, null, executor).join();
    store.completeCommitTransactions(scope, stream, record, null, executor, Collections.emptyMap()).join();
    store.setState(scope, stream, State.ACTIVE, null, executor).join();
    activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
    assertEquals(3, activeEpoch.getEpoch());
    assertEquals(1, activeEpoch.getReferenceEpoch());
    assertEquals(3, activeEpoch.getSegments().size());
    List<StreamSegmentRecord> txnDuplicate = store.getSegmentsInEpoch(scope, stream, 2, null, executor).join();
    assertEquals(2, txnDuplicate.size());
    List<StreamSegmentRecord> activeEpochDuplicate = store.getSegmentsInEpoch(scope, stream, 3, null, executor).join();
    assertEquals(3, activeEpochDuplicate.size());
    EpochRecord txnCommittedEpoch = store.getEpoch(scope, stream, 2, null, executor).join();
    assertEquals(0, txnCommittedEpoch.getReferenceEpoch());
    assertEquals(store.transactionStatus(scope, stream, tx01.getId(), null, executor).join(), TxnStatus.COMMITTED);
    assertEquals(store.transactionStatus(scope, stream, tx02.getId(), null, executor).join(), TxnStatus.COMMITTED);
    assertEquals(store.transactionStatus(scope, stream, tx03.getId(), null, executor).join(), TxnStatus.OPEN);
    store.sealTransaction(scope, stream, tx03.getId(), true, Optional.of(tx03.getVersion()), "", Long.MIN_VALUE, null, executor).get();
    // endregion
    // region verify migrate request for manual scale
    // now start manual scale against previously submitted scale request that was on old epoch from before rolling txn.
    // verify that it gets migrated to latest duplicate epoch
    state = store.getVersionedState(scope, stream, null, executor).join();
    state = store.updateVersionedState(scope, stream, State.SCALING, state, null, executor).join();
    versioned2 = store.submitScale(scope, stream, scale2SealedSegments, Arrays.asList(segment4, segment5), scaleTs2, null, null, executor).join();
    versioned2 = store.startScale(scope, stream, true, versioned2, state, null, executor).join();
    store.scaleCreateNewEpochs(scope, stream, versioned2, null, executor).join();
    txnId = store.generateTransactionId(scope, stream, null, executor).join();
    VersionedTransactionData tx14 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).get();
    assertEquals(1, tx14.getEpoch());
    store.sealTransaction(scope, stream, tx14.getId(), true, Optional.of(tx14.getVersion()), "", Long.MIN_VALUE, null, executor).get();
    // verify that new txns can be created and are created on original epoch
    txnId = store.generateTransactionId(scope, stream, null, executor).join();
    VersionedTransactionData tx15 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).get();
    assertEquals(1, tx15.getEpoch());
    store.scaleCreateNewEpochs(scope, stream, versioned2, null, executor).join();
    store.scaleSegmentsSealed(scope, stream, Collections.emptyMap(), versioned2, null, executor).join();
    store.completeScale(scope, stream, versioned2, null, executor).join();
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
    activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
    assertEquals(4, activeEpoch.getEpoch());
    assertEquals(4, activeEpoch.getReferenceEpoch());
    store.sealTransaction(scope, stream, tx15.getId(), true, Optional.of(tx15.getVersion()), "", Long.MIN_VALUE, null, executor).get();
    record = store.startCommitTransactions(scope, stream, 100, null, executor).join().getKey();
    store.setState(scope, stream, State.COMMITTING_TXN, null, executor).get();
    record = store.startRollingTxn(scope, stream, activeEpoch.getEpoch(), record, null, executor).join();
    store.rollingTxnCreateDuplicateEpochs(scope, stream, Collections.emptyMap(), System.currentTimeMillis(), record, null, executor).join();
    store.completeRollingTxn(scope, stream, Collections.emptyMap(), record, null, executor).join();
    store.completeCommitTransactions(scope, stream, record, null, executor, Collections.emptyMap()).join();
    store.setState(scope, stream, State.ACTIVE, null, executor).join();
    activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
    assertEquals(6, activeEpoch.getEpoch());
    assertEquals(4, activeEpoch.getReferenceEpoch());
// endregion
}
Also used : Arrays(java.util.Arrays) StreamCut(io.pravega.client.stream.StreamCut) ArgumentMatchers(org.mockito.ArgumentMatchers) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) AssertExtensions(io.pravega.test.common.AssertExtensions) Random(java.util.Random) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) Pair(org.apache.commons.lang3.tuple.Pair) Stream(io.pravega.client.stream.Stream) Duration(java.time.Duration) Map(java.util.Map) After(org.junit.After) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Controller(io.pravega.controller.stream.api.grpc.v1.Controller) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) DeleteScopeStatus(io.pravega.controller.stream.api.grpc.v1.Controller.DeleteScopeStatus) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) StreamCutReferenceRecord(io.pravega.controller.store.stream.records.StreamCutReferenceRecord) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) ImmutableMap(com.google.common.collect.ImmutableMap) Set(java.util.Set) CompletionException(java.util.concurrent.CompletionException) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) List(java.util.List) Assert.assertFalse(org.junit.Assert.assertFalse) Optional(java.util.Optional) HistoryTimeSeries(io.pravega.controller.store.stream.records.HistoryTimeSeries) Futures(io.pravega.common.concurrent.Futures) Segment(io.pravega.client.segment.impl.Segment) CommittingTransactionsRecord(io.pravega.controller.store.stream.records.CommittingTransactionsRecord) NameUtils.computeSegmentId(io.pravega.shared.NameUtils.computeSegmentId) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) Exceptions(io.pravega.common.Exceptions) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) RetentionSet(io.pravega.controller.store.stream.records.RetentionSet) Mockito.spy(org.mockito.Mockito.spy) ArrayList(java.util.ArrayList) Strings(com.google.common.base.Strings) ReaderGroupConfigRecord(io.pravega.controller.store.stream.records.ReaderGroupConfigRecord) Lists(com.google.common.collect.Lists) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) RecordHelper(io.pravega.controller.store.stream.records.RecordHelper) SimpleEntry(java.util.AbstractMap.SimpleEntry) SealedSegmentsMapShard(io.pravega.controller.store.stream.records.SealedSegmentsMapShard) Before(org.junit.Before) NameUtils(io.pravega.shared.NameUtils) Assert.assertNotNull(org.junit.Assert.assertNotNull) WriterMark(io.pravega.controller.store.stream.records.WriterMark) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) TxnResource(io.pravega.controller.store.task.TxnResource) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) ExecutionException(java.util.concurrent.ExecutionException) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) AbstractMap(java.util.AbstractMap) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) Assert.assertNull(org.junit.Assert.assertNull) Version(io.pravega.controller.store.Version) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) Assert(org.junit.Assert) Collections(java.util.Collections) Mockito.reset(org.mockito.Mockito.reset) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) SimpleEntry(java.util.AbstractMap.SimpleEntry) CommittingTransactionsRecord(io.pravega.controller.store.stream.records.CommittingTransactionsRecord) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) SimpleEntry(java.util.AbstractMap.SimpleEntry) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) UUID(java.util.UUID) Test(org.junit.Test)

Example 29 with EpochRecord

use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.

the class StreamMetadataStoreTest method scaleWithTxnForInconsistentScanerios.

@Test(timeout = 30000)
public void scaleWithTxnForInconsistentScanerios() throws Exception {
    final String scope = "ScopeScaleWithTx";
    final String stream = "StreamScaleWithTx1";
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
    long start = System.currentTimeMillis();
    store.createScope(scope, null, executor).get();
    store.createStream(scope, stream, configuration, start, null, executor).get();
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
    UUID txnId = store.generateTransactionId(scope, stream, null, executor).join();
    VersionedTransactionData tx1 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).get();
    store.sealTransaction(scope, stream, txnId, true, Optional.of(tx1.getVersion()), "", Long.MIN_VALUE, null, executor).get();
    long scaleTs = System.currentTimeMillis();
    List<Long> scale1SealedSegments = Collections.singletonList(0L);
    // run a scale on segment 1
    VersionedMetadata<EpochTransitionRecord> versioned = store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(new AbstractMap.SimpleEntry<>(0.0, 0.25), new AbstractMap.SimpleEntry<>(0.25, 0.5)), scaleTs, null, null, executor).join();
    EpochTransitionRecord response = versioned.getObject();
    assertEquals(0, response.getActiveEpoch());
    VersionedMetadata<State> state = store.getVersionedState(scope, stream, null, executor).join();
    state = store.updateVersionedState(scope, stream, State.SCALING, state, null, executor).join();
    store.startScale(scope, stream, false, versioned, state, null, executor).join();
    store.scaleCreateNewEpochs(scope, stream, versioned, null, executor).join();
    store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, null, executor).join();
    store.completeScale(scope, stream, versioned, null, executor).join();
    store.setState(scope, stream, State.ACTIVE, null, executor).join();
    // start second scale
    versioned = store.submitScale(scope, stream, Collections.singletonList(1L), Arrays.asList(new AbstractMap.SimpleEntry<>(0.5, 0.75), new AbstractMap.SimpleEntry<>(0.75, 1.0)), scaleTs, null, null, executor).join();
    response = versioned.getObject();
    assertEquals(1, response.getActiveEpoch());
    EpochRecord activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
    VersionedMetadata<CommittingTransactionsRecord> record = store.startCommitTransactions(scope, stream, 100, null, executor).join().getKey();
    store.setState(scope, stream, State.COMMITTING_TXN, null, executor).join();
    record = store.startRollingTxn(scope, stream, activeEpoch.getEpoch(), record, null, executor).join();
    store.rollingTxnCreateDuplicateEpochs(scope, stream, Collections.emptyMap(), System.currentTimeMillis(), record, null, executor).join();
    store.completeRollingTxn(scope, stream, Collections.emptyMap(), record, null, executor).join();
    store.completeCommitTransactions(scope, stream, record, null, executor, Collections.emptyMap()).join();
    store.setState(scope, stream, State.ACTIVE, null, executor).join();
    state = store.getVersionedState(scope, stream, null, executor).join();
    state = store.updateVersionedState(scope, stream, State.SCALING, state, null, executor).join();
    versioned = store.submitScale(scope, stream, Collections.singletonList(1L), Arrays.asList(new AbstractMap.SimpleEntry<>(0.5, 0.75), new AbstractMap.SimpleEntry<>(0.75, 1.0)), scaleTs, null, null, executor).join();
    response = versioned.getObject();
    assertEquals(1, response.getActiveEpoch());
    AssertExtensions.assertFutureThrows("attempting to create new segments against inconsistent epoch transition record", store.startScale(scope, stream, false, versioned, state, null, executor), e -> Exceptions.unwrap(e) instanceof IllegalStateException);
    // verify that state is reset to active
    State stateVal = store.getState(scope, stream, true, null, executor).join();
    assertEquals(State.ACTIVE, stateVal);
}
Also used : Arrays(java.util.Arrays) StreamCut(io.pravega.client.stream.StreamCut) ArgumentMatchers(org.mockito.ArgumentMatchers) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) AssertExtensions(io.pravega.test.common.AssertExtensions) Random(java.util.Random) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) Pair(org.apache.commons.lang3.tuple.Pair) Stream(io.pravega.client.stream.Stream) Duration(java.time.Duration) Map(java.util.Map) After(org.junit.After) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Controller(io.pravega.controller.stream.api.grpc.v1.Controller) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) DeleteScopeStatus(io.pravega.controller.stream.api.grpc.v1.Controller.DeleteScopeStatus) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) StreamCutReferenceRecord(io.pravega.controller.store.stream.records.StreamCutReferenceRecord) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) ImmutableMap(com.google.common.collect.ImmutableMap) Set(java.util.Set) CompletionException(java.util.concurrent.CompletionException) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) List(java.util.List) Assert.assertFalse(org.junit.Assert.assertFalse) Optional(java.util.Optional) HistoryTimeSeries(io.pravega.controller.store.stream.records.HistoryTimeSeries) Futures(io.pravega.common.concurrent.Futures) Segment(io.pravega.client.segment.impl.Segment) CommittingTransactionsRecord(io.pravega.controller.store.stream.records.CommittingTransactionsRecord) NameUtils.computeSegmentId(io.pravega.shared.NameUtils.computeSegmentId) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) Exceptions(io.pravega.common.Exceptions) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) RetentionSet(io.pravega.controller.store.stream.records.RetentionSet) Mockito.spy(org.mockito.Mockito.spy) ArrayList(java.util.ArrayList) Strings(com.google.common.base.Strings) ReaderGroupConfigRecord(io.pravega.controller.store.stream.records.ReaderGroupConfigRecord) Lists(com.google.common.collect.Lists) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) RecordHelper(io.pravega.controller.store.stream.records.RecordHelper) SimpleEntry(java.util.AbstractMap.SimpleEntry) SealedSegmentsMapShard(io.pravega.controller.store.stream.records.SealedSegmentsMapShard) Before(org.junit.Before) NameUtils(io.pravega.shared.NameUtils) Assert.assertNotNull(org.junit.Assert.assertNotNull) WriterMark(io.pravega.controller.store.stream.records.WriterMark) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) TxnResource(io.pravega.controller.store.task.TxnResource) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) ExecutionException(java.util.concurrent.ExecutionException) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) AbstractMap(java.util.AbstractMap) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) Assert.assertNull(org.junit.Assert.assertNull) Version(io.pravega.controller.store.Version) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) Assert(org.junit.Assert) Collections(java.util.Collections) Mockito.reset(org.mockito.Mockito.reset) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) SimpleEntry(java.util.AbstractMap.SimpleEntry) CommittingTransactionsRecord(io.pravega.controller.store.stream.records.CommittingTransactionsRecord) AbstractMap(java.util.AbstractMap) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) UUID(java.util.UUID) Test(org.junit.Test)

Aggregations

EpochRecord (io.pravega.controller.store.stream.records.EpochRecord)29 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)21 UUID (java.util.UUID)21 Test (org.junit.Test)20 VersionedMetadata (io.pravega.controller.store.VersionedMetadata)19 EpochTransitionRecord (io.pravega.controller.store.stream.records.EpochTransitionRecord)19 ArrayList (java.util.ArrayList)19 StreamSegmentRecord (io.pravega.controller.store.stream.records.StreamSegmentRecord)18 Exceptions (io.pravega.common.Exceptions)17 Futures (io.pravega.common.concurrent.Futures)17 CommittingTransactionsRecord (io.pravega.controller.store.stream.records.CommittingTransactionsRecord)17 NameUtils.computeSegmentId (io.pravega.shared.NameUtils.computeSegmentId)17 HashMap (java.util.HashMap)17 List (java.util.List)17 Map (java.util.Map)17 Collectors (java.util.stream.Collectors)17 ImmutableMap (com.google.common.collect.ImmutableMap)16 Lists (com.google.common.collect.Lists)16 Version (io.pravega.controller.store.Version)16 StreamConfigurationRecord (io.pravega.controller.store.stream.records.StreamConfigurationRecord)16