Search in sources :

Example 16 with EpochRecord

use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.

the class StreamMetadataStoreTest method concurrentStartScaleTest.

@Test(timeout = 30000)
public void concurrentStartScaleTest() throws Exception {
    final String scope = "ScopeScale";
    final String stream = "StreamScale1";
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
    long start = System.currentTimeMillis();
    store.createScope(scope, null, executor).get();
    store.createStream(scope, stream, configuration, start, null, executor).get();
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
    // set minimum number of segments to 1
    StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
    store.startUpdateConfiguration(scope, stream, config, null, executor).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = store.getConfigurationRecord(scope, stream, null, executor).join();
    store.completeUpdateConfiguration(scope, stream, configRecord, null, executor).join();
    // region concurrent start scale
    // Test scenario where one request starts and completes as the other is waiting on StartScale.createEpochTransition
    SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.0, 1.0);
    List<Long> segmentsToSeal = Arrays.asList(0L, 1L);
    long scaleTs = System.currentTimeMillis();
    @SuppressWarnings("unchecked") PersistentStreamBase streamObj = (PersistentStreamBase) ((AbstractStreamMetadataStore) store).getStream(scope, stream, null);
    PersistentStreamBase streamObjSpied = spy(streamObj);
    CompletableFuture<Void> latch = new CompletableFuture<>();
    CompletableFuture<Void> updateEpochTransitionCalled = new CompletableFuture<>();
    doAnswer(x -> streamObj.getEpochTransitionNode(x.getArgument(0))).when(streamObjSpied).getEpochTransitionNode(any());
    doAnswer(x -> streamObj.updateEpochTransitionNode(x.getArgument(0), x.getArgument(1))).when(streamObjSpied).updateEpochTransitionNode(any(), any());
    doAnswer(x -> CompletableFuture.runAsync(() -> {
        VersionedMetadata<EpochTransitionRecord> argument = x.getArgument(0);
        EpochTransitionRecord record = argument.getObject();
        if (record.getSegmentsToSeal().containsAll(segmentsToSeal)) {
            // wait until we create epoch transition outside of this method
            updateEpochTransitionCalled.complete(null);
            latch.join();
        }
    }).thenCompose(v -> streamObj.updateEpochTransitionNode(x.getArgument(0), x.getArgument(1)))).when(streamObjSpied).updateEpochTransitionNode(any(), any());
    StreamOperationContext context = new StreamOperationContext(((AbstractStreamMetadataStore) store).getScope(scope, null), streamObjSpied, 0L);
    // the following should be stuck at createEpochTransition
    CompletableFuture<VersionedMetadata<EpochTransitionRecord>> response = store.submitScale(scope, stream, segmentsToSeal, Collections.singletonList(segment2), scaleTs, null, context, executor);
    updateEpochTransitionCalled.join();
    // create new epochs corresponding to new scale as the previous scale waits to create epoch transition record
    SimpleEntry<Double, Double> segment2p = new SimpleEntry<>(0.0, 0.5);
    List<Long> segmentsToSeal2 = Collections.singletonList(0L);
    long scaleTs2 = System.currentTimeMillis();
    streamObjSpied.getEpochRecord(0, context).thenCompose(epochRecord -> {
        EpochTransitionRecord record = RecordHelper.computeEpochTransition(epochRecord, segmentsToSeal2, Collections.singletonList(segment2p), scaleTs2);
        return streamObjSpied.getEpochTransition(context).thenCompose(existing -> streamObjSpied.updateEpochTransitionNode(new VersionedMetadata<>(record, existing.getVersion()), context)).thenApply(v -> new VersionedMetadata<>(record, v));
    }).thenCompose(epochRecord -> store.getVersionedState(scope, stream, context, executor).thenCompose(state -> store.updateVersionedState(scope, stream, State.SCALING, state, context, executor).thenCompose(updatedState -> store.startScale(scope, stream, false, epochRecord, updatedState, context, executor)).thenCompose(x -> store.scaleCreateNewEpochs(scope, stream, epochRecord, context, executor)).thenCompose(x -> store.scaleSegmentsSealed(scope, stream, segmentsToSeal2.stream().collect(Collectors.toMap(r -> r, r -> 0L)), epochRecord, context, executor)).thenCompose(x -> store.completeScale(scope, stream, epochRecord, context, executor)))).thenCompose(y -> store.setState(scope, stream, State.ACTIVE, context, executor)).join();
    latch.complete(null);
    // first scale should fail in attempting to update epoch transition record.
    AssertExtensions.assertSuppliedFutureThrows("WriteConflict in start scale", () -> response, e -> Exceptions.unwrap(e) instanceof StoreException.WriteConflictException);
    VersionedMetadata<EpochTransitionRecord> versioned = streamObj.getEpochTransition(context).join();
    EpochTransitionRecord epochTransitionRecord = versioned.getObject();
    assertEquals(EpochTransitionRecord.EMPTY, epochTransitionRecord);
    // now that start scale succeeded, we should set the state to scaling.
    VersionedMetadata<State> state = store.getVersionedState(scope, stream, context, executor).join();
    state = store.updateVersionedState(scope, stream, State.SCALING, state, context, executor).join();
    // now call first step of scaling -- createNewSegments. this should throw exception
    AssertExtensions.assertFutureThrows("epoch transition was supposed to be invalid", store.startScale(scope, stream, false, versioned, state, context, executor), e -> Exceptions.unwrap(e) instanceof IllegalStateException);
    // verify that state is reset to ACTIVE
    assertEquals(State.ACTIVE, store.getState(scope, stream, true, context, executor).join());
// endregion
}
Also used : Arrays(java.util.Arrays) StreamCut(io.pravega.client.stream.StreamCut) ArgumentMatchers(org.mockito.ArgumentMatchers) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) AssertExtensions(io.pravega.test.common.AssertExtensions) Random(java.util.Random) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) Pair(org.apache.commons.lang3.tuple.Pair) Stream(io.pravega.client.stream.Stream) Duration(java.time.Duration) Map(java.util.Map) After(org.junit.After) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Controller(io.pravega.controller.stream.api.grpc.v1.Controller) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) DeleteScopeStatus(io.pravega.controller.stream.api.grpc.v1.Controller.DeleteScopeStatus) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) StreamCutReferenceRecord(io.pravega.controller.store.stream.records.StreamCutReferenceRecord) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) ImmutableMap(com.google.common.collect.ImmutableMap) Set(java.util.Set) CompletionException(java.util.concurrent.CompletionException) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) List(java.util.List) Assert.assertFalse(org.junit.Assert.assertFalse) Optional(java.util.Optional) HistoryTimeSeries(io.pravega.controller.store.stream.records.HistoryTimeSeries) Futures(io.pravega.common.concurrent.Futures) Segment(io.pravega.client.segment.impl.Segment) CommittingTransactionsRecord(io.pravega.controller.store.stream.records.CommittingTransactionsRecord) NameUtils.computeSegmentId(io.pravega.shared.NameUtils.computeSegmentId) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) Exceptions(io.pravega.common.Exceptions) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) RetentionSet(io.pravega.controller.store.stream.records.RetentionSet) Mockito.spy(org.mockito.Mockito.spy) ArrayList(java.util.ArrayList) Strings(com.google.common.base.Strings) ReaderGroupConfigRecord(io.pravega.controller.store.stream.records.ReaderGroupConfigRecord) Lists(com.google.common.collect.Lists) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) RecordHelper(io.pravega.controller.store.stream.records.RecordHelper) SimpleEntry(java.util.AbstractMap.SimpleEntry) SealedSegmentsMapShard(io.pravega.controller.store.stream.records.SealedSegmentsMapShard) Before(org.junit.Before) NameUtils(io.pravega.shared.NameUtils) Assert.assertNotNull(org.junit.Assert.assertNotNull) WriterMark(io.pravega.controller.store.stream.records.WriterMark) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) TxnResource(io.pravega.controller.store.task.TxnResource) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) ExecutionException(java.util.concurrent.ExecutionException) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) AbstractMap(java.util.AbstractMap) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) Assert.assertNull(org.junit.Assert.assertNull) Version(io.pravega.controller.store.Version) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) Assert(org.junit.Assert) Collections(java.util.Collections) Mockito.reset(org.mockito.Mockito.reset) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) CompletableFuture(java.util.concurrent.CompletableFuture) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) SimpleEntry(java.util.AbstractMap.SimpleEntry) Test(org.junit.Test)

Example 17 with EpochRecord

use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.

the class StreamTestBase method testCreateStream.

@Test(timeout = 30000L)
public void testCreateStream() {
    OperationContext context = getContext();
    PersistentStreamBase stream = createStream("scope", "stream", System.currentTimeMillis(), 2, 0);
    assertEquals(State.ACTIVE, stream.getState(true, context).join());
    EpochRecord activeEpoch = stream.getActiveEpoch(true, context).join();
    assertEquals(0, activeEpoch.getEpoch());
    assertEquals(2, activeEpoch.getSegments().size());
    VersionedMetadata<StreamTruncationRecord> truncationRecord = stream.getTruncationRecord(context).join();
    assertEquals(StreamTruncationRecord.EMPTY, truncationRecord.getObject());
    VersionedMetadata<EpochTransitionRecord> etr = stream.getEpochTransition(context).join();
    assertEquals(EpochTransitionRecord.EMPTY, etr.getObject());
    VersionedMetadata<CommittingTransactionsRecord> ctr = stream.getVersionedCommitTransactionsRecord(context).join();
    assertEquals(CommittingTransactionsRecord.EMPTY, ctr.getObject());
    assertEquals(activeEpoch, stream.getEpochRecord(0, context).join());
    AssertExtensions.assertFutureThrows("", stream.getEpochRecord(1, context), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
}
Also used : TestOperationContext(io.pravega.controller.store.TestOperationContext) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) CommittingTransactionsRecord(io.pravega.controller.store.stream.records.CommittingTransactionsRecord) Test(org.junit.Test)

Example 18 with EpochRecord

use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.

the class StreamTestBase method createScaleAndRollStreamForMultiChunkTests.

// region multiple chunks test
private PersistentStreamBase createScaleAndRollStreamForMultiChunkTests(String name, String scope, int startingSegmentNumber, Supplier<Long> time) {
    OperationContext context = getContext();
    createScope(scope, context);
    PersistentStreamBase stream = createStream(scope, name, time.get(), 5, startingSegmentNumber, 2, 2);
    UUID txnId = createAndCommitTransaction(stream, 0, 0L);
    // scale the stream 5 times so that over all we have 6 epochs and hence 3 chunks.
    for (int i = 0; i < 5; i++) {
        StreamSegmentRecord first = stream.getActiveSegments(context).join().get(0);
        ArrayList<Long> sealedSegments = Lists.newArrayList(first.segmentId());
        List<Map.Entry<Double, Double>> newRanges = new LinkedList<>();
        newRanges.add(new AbstractMap.SimpleEntry<>(first.getKeyStart(), first.getKeyEnd()));
        Map<Long, Long> sealedSizeMap = new HashMap<>();
        sealedSizeMap.put(first.segmentId(), 100L);
        scaleStream(stream, time.get(), sealedSegments, newRanges, sealedSizeMap);
    }
    EpochRecord activeEpoch = stream.getActiveEpoch(true, context).join();
    // now roll transaction so that we have 2 more epochs added for overall 8 epochs and 4 chunks
    Map<Long, Long> map1 = stream.getEpochRecord(0, context).join().getSegmentIds().stream().collect(Collectors.toMap(x -> computeSegmentId(NameUtils.getSegmentNumber(x), activeEpoch.getEpoch() + 1), x -> 100L));
    Map<Long, Long> map2 = activeEpoch.getSegmentIds().stream().collect(Collectors.toMap(x -> x, x -> 100L));
    rollTransactions(stream, time.get(), 0, activeEpoch.getEpoch(), map1, map2);
    // scale the stream 5 times so that over all we have 13 epochs and hence 7 chunks.
    for (int i = 0; i < 5; i++) {
        StreamSegmentRecord first = stream.getActiveSegments(context).join().get(0);
        ArrayList<Long> sealedSegments = Lists.newArrayList(first.segmentId());
        List<Map.Entry<Double, Double>> newRanges = new LinkedList<>();
        newRanges.add(new AbstractMap.SimpleEntry<>(first.getKeyStart(), first.getKeyEnd()));
        Map<Long, Long> sealedSizeMap = new HashMap<>();
        sealedSizeMap.put(first.segmentId(), 100L);
        scaleStream(stream, time.get(), sealedSegments, newRanges, sealedSizeMap);
    }
    return stream;
}
Also used : TestOperationContext(io.pravega.controller.store.TestOperationContext) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) AssertExtensions(io.pravega.test.common.AssertExtensions) Random(java.util.Random) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) Mockito.anyBoolean(org.mockito.Mockito.anyBoolean) Map(java.util.Map) After(org.junit.After) Mockito.doAnswer(org.mockito.Mockito.doAnswer) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) ImmutableMap(com.google.common.collect.ImmutableMap) Set(java.util.Set) ActiveTxnRecord(io.pravega.controller.store.stream.records.ActiveTxnRecord) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) List(java.util.List) Optional(java.util.Optional) HistoryTimeSeries(io.pravega.controller.store.stream.records.HistoryTimeSeries) Futures(io.pravega.common.concurrent.Futures) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) CommittingTransactionsRecord(io.pravega.controller.store.stream.records.CommittingTransactionsRecord) NameUtils.computeSegmentId(io.pravega.shared.NameUtils.computeSegmentId) Exceptions(io.pravega.common.Exceptions) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) Mockito.spy(org.mockito.Mockito.spy) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Lists(com.google.common.collect.Lists) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) LinkedList(java.util.LinkedList) SealedSegmentsMapShard(io.pravega.controller.store.stream.records.SealedSegmentsMapShard) Before(org.junit.Before) NameUtils.getEpoch(io.pravega.shared.NameUtils.getEpoch) NameUtils(io.pravega.shared.NameUtils) WriterMark(io.pravega.controller.store.stream.records.WriterMark) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) Mockito.times(org.mockito.Mockito.times) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) Mockito.verify(org.mockito.Mockito.verify) AtomicLong(java.util.concurrent.atomic.AtomicLong) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) AbstractMap(java.util.AbstractMap) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) Version(io.pravega.controller.store.Version) TestOperationContext(io.pravega.controller.store.TestOperationContext) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) Collections(java.util.Collections) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) HashMap(java.util.HashMap) LinkedList(java.util.LinkedList) AbstractMap(java.util.AbstractMap) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) AtomicLong(java.util.concurrent.atomic.AtomicLong) UUID(java.util.UUID)

Example 19 with EpochRecord

use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.

the class RequestHandlersTest method testScaleIgnoreFairness.

@Test
public void testScaleIgnoreFairness() {
    StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtTasks, executor), executor);
    String fairness = "fairness";
    streamStore.createScope(fairness, null, executor).join();
    streamMetadataTasks.createStream(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), 0L).join();
    // 1. set segment helper mock to throw exception
    doAnswer(x -> Futures.failedFuture(new RuntimeException())).when(segmentHelper).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
    // 2. start scale --> this should fail with a retryable exception while talking to segment store!
    ScaleOpEvent scaleEvent = new ScaleOpEvent(fairness, fairness, Collections.singletonList(0L), Collections.singletonList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), false, System.currentTimeMillis(), 0L);
    AssertExtensions.assertFutureThrows("", streamRequestHandler.process(scaleEvent, () -> false), e -> Exceptions.unwrap(e) instanceof RuntimeException);
    // verify that scale was started
    assertEquals(State.SCALING, streamStore.getState(fairness, fairness, true, null, executor).join());
    // 3. set waiting processor to "random name"
    streamStore.createWaitingRequestIfAbsent(fairness, fairness, "myProcessor", null, executor).join();
    // 4. reset segment helper to return success
    doAnswer(x -> CompletableFuture.completedFuture(true)).when(segmentHelper).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
    // 5. process again. it should succeed while ignoring waiting processor
    streamRequestHandler.process(scaleEvent, () -> false).join();
    EpochRecord activeEpoch = streamStore.getActiveEpoch(fairness, fairness, null, true, executor).join();
    assertEquals(1, activeEpoch.getEpoch());
    assertEquals(State.ACTIVE, streamStore.getState(fairness, fairness, true, null, executor).join());
    // 6. run a new scale. it should fail because of waiting processor.
    ScaleOpEvent scaleEvent2 = new ScaleOpEvent(fairness, fairness, Collections.singletonList(NameUtils.computeSegmentId(1, 1)), Collections.singletonList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), false, System.currentTimeMillis(), 0L);
    AssertExtensions.assertFutureThrows("", streamRequestHandler.process(scaleEvent2, () -> false), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
    streamStore.deleteWaitingRequestConditionally(fairness, fairness, "myProcessor", null, executor).join();
}
Also used : DeleteScopeTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteScopeTask) SealStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.SealStreamTask) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) UpdateStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.UpdateStreamTask) CreateReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.CreateReaderGroupTask) DeleteReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteReaderGroupTask) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ScaleOperationTask(io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask) ScaleOpEvent(io.pravega.shared.controller.event.ScaleOpEvent) StoreException(io.pravega.controller.store.stream.StoreException) AutoScaleTask(io.pravega.controller.server.eventProcessor.requesthandlers.AutoScaleTask) StreamRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.StreamRequestHandler) DeleteStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteStreamTask) UpdateReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.UpdateReaderGroupTask) TruncateStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.TruncateStreamTask) Test(org.junit.Test)

Example 20 with EpochRecord

use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.

the class ControllerEventProcessorTest method testCommitEventForSealingStream.

@Test(timeout = 60000)
public void testCommitEventForSealingStream() {
    ScaleOperationTask scaleTask = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
    SealStreamTask sealStreamTask = new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor);
    String stream = "commitWithSeal";
    StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
    streamStore.createStream(SCOPE, stream, config, System.currentTimeMillis(), null, executor).join();
    streamStore.setState(SCOPE, stream, State.ACTIVE, null, executor).join();
    UUID txnOnEpoch0 = streamStore.generateTransactionId(SCOPE, stream, null, executor).join();
    VersionedTransactionData txnData0 = streamStore.createTransaction(SCOPE, stream, txnOnEpoch0, 10000, 10000, null, executor).join();
    Assert.assertNotNull(txnData0);
    checkTransactionState(SCOPE, stream, txnOnEpoch0, TxnStatus.OPEN);
    streamStore.sealTransaction(SCOPE, stream, txnData0.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
    checkTransactionState(SCOPE, stream, txnData0.getId(), TxnStatus.COMMITTING);
    // scale stream
    List<Map.Entry<Double, Double>> newRange = new LinkedList<>();
    newRange.add(new AbstractMap.SimpleEntry<>(0.0, 1.0));
    scaleTask.execute(new ScaleOpEvent(SCOPE, stream, Collections.singletonList(0L), newRange, false, System.currentTimeMillis(), 0L)).join();
    UUID txnOnEpoch1 = streamStore.generateTransactionId(SCOPE, stream, null, executor).join();
    VersionedTransactionData txnData1 = streamStore.createTransaction(SCOPE, stream, txnOnEpoch1, 10000, 10000, null, executor).join();
    Assert.assertNotNull(txnData1);
    checkTransactionState(SCOPE, stream, txnOnEpoch1, TxnStatus.OPEN);
    streamStore.sealTransaction(SCOPE, stream, txnData1.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
    checkTransactionState(SCOPE, stream, txnData1.getId(), TxnStatus.COMMITTING);
    // set the stream to SEALING
    streamStore.setState(SCOPE, stream, State.SEALING, null, executor).join();
    // attempt to seal the stream. This should fail with postponement.
    AssertExtensions.assertFutureThrows("Seal stream should fail with operation not allowed as their are outstanding transactions", sealStreamTask.execute(new SealStreamEvent(SCOPE, stream, 0L)), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
    // now attempt to commit the transaction on epoch 1. epoch in commit event is ignored and transactions on lowest epoch
    // should be committed first.
    CommitRequestHandler commitEventProcessor = new CommitRequestHandler(streamStore, streamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
    commitEventProcessor.processEvent(new CommitEvent(SCOPE, stream, txnData1.getEpoch())).join();
    checkTransactionState(SCOPE, stream, txnData0.getId(), TxnStatus.COMMITTED);
    checkTransactionState(SCOPE, stream, txnData1.getId(), TxnStatus.COMMITTING);
    EpochRecord activeEpoch = streamStore.getActiveEpoch(SCOPE, stream, null, true, executor).join();
    assertEquals(3, activeEpoch.getEpoch());
    assertEquals(1, activeEpoch.getReferenceEpoch());
    // attempt to seal the stream. This should still fail with postponement.
    AssertExtensions.assertFutureThrows("Seal stream should fail with operation not allowed as their are outstanding transactions", sealStreamTask.execute(new SealStreamEvent(SCOPE, stream, 0L)), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
    // now attempt to commit the transaction on epoch 1.
    commitEventProcessor.processEvent(new CommitEvent(SCOPE, stream, txnData1.getEpoch())).join();
    checkTransactionState(SCOPE, stream, txnData1.getId(), TxnStatus.COMMITTED);
    // verify transaction has rolled over
    activeEpoch = streamStore.getActiveEpoch(SCOPE, stream, null, true, executor).join();
    assertEquals(3, activeEpoch.getEpoch());
    assertEquals(1, activeEpoch.getReferenceEpoch());
    // now attempt to seal the stream. it should complete.
    sealStreamTask.execute(new SealStreamEvent(SCOPE, stream, 0L)).join();
}
Also used : SealStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.SealStreamTask) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) SealStreamEvent(io.pravega.shared.controller.event.SealStreamEvent) ScaleOperationTask(io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask) VersionedTransactionData(io.pravega.controller.store.stream.VersionedTransactionData) LinkedList(java.util.LinkedList) ScaleOpEvent(io.pravega.shared.controller.event.ScaleOpEvent) CommitRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.CommitRequestHandler) StoreException(io.pravega.controller.store.stream.StoreException) AbstractMap(java.util.AbstractMap) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) CommitEvent(io.pravega.shared.controller.event.CommitEvent) UUID(java.util.UUID) Test(org.junit.Test)

Aggregations

EpochRecord (io.pravega.controller.store.stream.records.EpochRecord)29 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)21 UUID (java.util.UUID)21 Test (org.junit.Test)20 VersionedMetadata (io.pravega.controller.store.VersionedMetadata)19 EpochTransitionRecord (io.pravega.controller.store.stream.records.EpochTransitionRecord)19 ArrayList (java.util.ArrayList)19 StreamSegmentRecord (io.pravega.controller.store.stream.records.StreamSegmentRecord)18 Exceptions (io.pravega.common.Exceptions)17 Futures (io.pravega.common.concurrent.Futures)17 CommittingTransactionsRecord (io.pravega.controller.store.stream.records.CommittingTransactionsRecord)17 NameUtils.computeSegmentId (io.pravega.shared.NameUtils.computeSegmentId)17 HashMap (java.util.HashMap)17 List (java.util.List)17 Map (java.util.Map)17 Collectors (java.util.stream.Collectors)17 ImmutableMap (com.google.common.collect.ImmutableMap)16 Lists (com.google.common.collect.Lists)16 Version (io.pravega.controller.store.Version)16 StreamConfigurationRecord (io.pravega.controller.store.stream.records.StreamConfigurationRecord)16