Search in sources :

Example 6 with StreamConfigurationRecord

use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.

the class WatermarkWorkflowTest method testWatermarkingWorkflow.

@Test(timeout = 30000L)
public void testWatermarkingWorkflow() {
    SynchronizerClientFactory clientFactory = spy(SynchronizerClientFactory.class);
    ConcurrentHashMap<String, MockRevisionedStreamClient> revisionedStreamClientMap = new ConcurrentHashMap<>();
    doAnswer(x -> {
        String streamName = x.getArgument(0);
        return revisionedStreamClientMap.compute(streamName, (s, rsc) -> {
            if (rsc != null) {
                return rsc;
            } else {
                return new MockRevisionedStreamClient();
            }
        });
    }).when(clientFactory).createRevisionedStreamClient(anyString(), any(), any());
    @Cleanup PeriodicWatermarking periodicWatermarking = new PeriodicWatermarking(streamMetadataStore, bucketStore, sp -> clientFactory, executor, new RequestTracker(false));
    String streamName = "stream";
    String scope = "scope";
    streamMetadataStore.createScope(scope, null, executor).join();
    streamMetadataStore.createStream(scope, streamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(3)).timestampAggregationTimeout(10000L).build(), System.currentTimeMillis(), null, executor).join();
    streamMetadataStore.setState(scope, streamName, State.ACTIVE, null, executor).join();
    // set minimum number of segments to 1
    StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).timestampAggregationTimeout(10000L).build();
    streamMetadataStore.startUpdateConfiguration(scope, streamName, config, null, executor).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = streamMetadataStore.getConfigurationRecord(scope, streamName, null, executor).join();
    streamMetadataStore.completeUpdateConfiguration(scope, streamName, configRecord, null, executor).join();
    // 2. note writer1, writer2, writer3 marks
    // writer 1 reports segments 0, 1.
    // writer 2 reports segments 1, 2,
    // writer 3 reports segment 0, 2
    String writer1 = "writer1";
    Map<Long, Long> map1 = ImmutableMap.of(0L, 100L, 1L, 200L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer1, 100L, map1, null, executor).join();
    String writer2 = "writer2";
    Map<Long, Long> map2 = ImmutableMap.of(1L, 100L, 2L, 200L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer2, 101L, map2, null, executor).join();
    String writer3 = "writer3";
    Map<Long, Long> map3 = ImmutableMap.of(2L, 100L, 0L, 200L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer3, 102L, map3, null, executor).join();
    // 3. run watermarking workflow.
    StreamImpl stream = new StreamImpl(scope, streamName);
    periodicWatermarking.watermark(stream).join();
    // verify that a watermark has been emitted.
    // this should emit a watermark that contains all three segments with offsets = 200L
    // and timestamp = 100L
    MockRevisionedStreamClient revisionedClient = revisionedStreamClientMap.get(NameUtils.getMarkStreamForStream(streamName));
    assertEquals(revisionedClient.watermarks.size(), 1);
    Watermark watermark = revisionedClient.watermarks.get(0).getValue();
    assertEquals(watermark.getLowerTimeBound(), 100L);
    assertEquals(watermark.getStreamCut().size(), 3);
    assertEquals(getSegmentOffset(watermark, 0L), 200L);
    assertEquals(getSegmentOffset(watermark, 1L), 200L);
    assertEquals(getSegmentOffset(watermark, 2L), 200L);
    // send positions only on segment 1 and segment 2. nothing on segment 0.
    map1 = ImmutableMap.of(1L, 300L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer1, 200L, map1, null, executor).join();
    map2 = ImmutableMap.of(1L, 100L, 2L, 300L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer2, 201L, map2, null, executor).join();
    map3 = ImmutableMap.of(2L, 300L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer3, 202L, map3, null, executor).join();
    // run watermark workflow. this will emit a watermark with time = 200L and streamcut = 0 -> 200L, 1 -> 300L, 2 -> 300L
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 2);
    watermark = revisionedClient.watermarks.get(1).getValue();
    assertEquals(watermark.getLowerTimeBound(), 200L);
    assertEquals(watermark.getStreamCut().size(), 3);
    assertEquals(getSegmentOffset(watermark, 0L), 200L);
    assertEquals(getSegmentOffset(watermark, 1L), 300L);
    assertEquals(getSegmentOffset(watermark, 2L), 300L);
    // scale stream 0, 1, 2 -> 3, 4
    scaleStream(streamName, scope);
    // writer 1 reports segments 0, 1.
    // writer 2 reports segments 1, 2
    // writer 3 reports segment 3
    map1 = ImmutableMap.of(0L, 300L, 1L, 400L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer1, 302L, map1, null, executor).join();
    map2 = ImmutableMap.of(1L, 100L, 2L, 400L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer2, 301L, map2, null, executor).join();
    long segment3 = NameUtils.computeSegmentId(3, 1);
    long segment4 = NameUtils.computeSegmentId(4, 1);
    map3 = ImmutableMap.of(segment3, 100L);
    // writer 3 has lowest reported time.
    streamMetadataStore.noteWriterMark(scope, streamName, writer3, 300L, map3, null, executor).join();
    // run watermark workflow. this will emit a watermark with time = 300L and streamcut = 3 -> 100L, 4 -> 0L
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 3);
    watermark = revisionedClient.watermarks.get(2).getValue();
    assertEquals(watermark.getLowerTimeBound(), 300L);
    assertEquals(watermark.getStreamCut().size(), 2);
    assertEquals(getSegmentOffset(watermark, segment3), 100L);
    assertEquals(getSegmentOffset(watermark, segment4), 0L);
    // report complete positions from writers.
    // writer 1 reports 0, 1, 2
    // writer 2 reports 0, 1, 2
    // writer 3 doesnt report.
    map1 = ImmutableMap.of(0L, 400L, 1L, 400L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer1, 400L, map1, null, executor).join();
    map2 = ImmutableMap.of(1L, 100L, 2L, 400L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer2, 401L, map2, null, executor).join();
    // run watermark workflow. there shouldn't be a watermark emitted because writer 3 is active and has not reported a time.
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 3);
    // even though writer3 is excluded from computation, its mark is still not removed because it is still active
    WriterMark writer3Mark = streamMetadataStore.getWriterMark(scope, streamName, writer3, null, executor).join();
    assertTrue(writer3Mark.isAlive());
    assertEquals(writer3Mark.getTimestamp(), 300L);
    // report shutdown of writer 3
    streamMetadataStore.shutdownWriter(scope, streamName, writer3, null, executor).join();
    writer3Mark = streamMetadataStore.getWriterMark(scope, streamName, writer3, null, executor).join();
    assertFalse(writer3Mark.isAlive());
    assertEquals(writer3Mark.getTimestamp(), 300L);
    // now a watermark should be generated. Time should be advanced. But watermark's stream cut is already ahead of writer's
    // positions so stream cut should not advance.
    // Also writer 3 being inactive and shutdown, should be removed.
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 4);
    watermark = revisionedClient.watermarks.get(3).getValue();
    assertEquals(watermark.getLowerTimeBound(), 400L);
    assertEquals(watermark.getStreamCut().size(), 2);
    assertEquals(getSegmentOffset(watermark, segment3), 100L);
    assertEquals(getSegmentOffset(watermark, segment4), 0L);
    AssertExtensions.assertFutureThrows("Writer 3 should have been removed from store", streamMetadataStore.getWriterMark(scope, streamName, writer3, null, executor), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
    // writer 1, 2 and 3 report marks. With writer 3 reporting mark on segment 4. Writer3 will get added again
    map1 = ImmutableMap.of(0L, 500L, 1L, 500L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer1, 500L, map1, null, executor).join();
    map2 = ImmutableMap.of(1L, 100L, 2L, 500L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer2, 501L, map2, null, executor).join();
    map3 = ImmutableMap.of(segment4, 500L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer3, 502L, map3, null, executor).join();
    // run watermarking workflow. It should generate watermark that includes segments 3 -> 100L and 4 -> 500L with time 500L
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 5);
    watermark = revisionedClient.watermarks.get(4).getValue();
    assertEquals(watermark.getLowerTimeBound(), 500L);
    assertEquals(watermark.getStreamCut().size(), 2);
    assertEquals(getSegmentOffset(watermark, segment3), 100L);
    assertEquals(getSegmentOffset(watermark, segment4), 500L);
}
Also used : WriterMark(io.pravega.controller.store.stream.records.WriterMark) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) RequestTracker(io.pravega.common.tracing.RequestTracker) Cleanup(lombok.Cleanup) StoreException(io.pravega.controller.store.stream.StoreException) SynchronizerClientFactory(io.pravega.client.SynchronizerClientFactory) StreamImpl(io.pravega.client.stream.impl.StreamImpl) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Watermark(io.pravega.shared.watermarks.Watermark) Test(org.junit.Test)

Example 7 with StreamConfigurationRecord

use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.

the class StreamMetadataTasksTest method updateConfigVerifyScale.

private void updateConfigVerifyScale(WriterMock requestEventWriter, StreamConfiguration streamConfiguration, int expectedSegmentCount) throws InterruptedException, ExecutionException {
    StreamConfigurationRecord configProp = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join().getObject();
    assertFalse(configProp.isUpdating());
    CompletableFuture<UpdateStreamStatus.Status> updateOperationFuture = streamMetadataTasks.updateStream(SCOPE, stream1, streamConfiguration, 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    assertEquals(UpdateStreamStatus.Status.SUCCESS, updateOperationFuture.join());
    // verify that the stream has scaled.
    assertEquals(consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size(), expectedSegmentCount);
    configProp = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join().getObject();
    assertEquals(configProp.getStreamConfiguration(), streamConfiguration);
}
Also used : UpdateSubscriberStatus(io.pravega.controller.stream.api.grpc.v1.Controller.UpdateSubscriberStatus) DeleteReaderGroupStatus(io.pravega.controller.stream.api.grpc.v1.Controller.DeleteReaderGroupStatus) ScaleStreamStatus(io.pravega.controller.stream.api.grpc.v1.Controller.ScaleResponse.ScaleStreamStatus) TxnStatus(io.pravega.controller.store.stream.TxnStatus) UpdateStreamStatus(io.pravega.controller.stream.api.grpc.v1.Controller.UpdateStreamStatus) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord)

Example 8 with StreamConfigurationRecord

use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.

the class StreamMetadataTasksTest method consumptionBasedRetentionWithScale.

@Test(timeout = 30000)
public void consumptionBasedRetentionWithScale() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(3);
    final RetentionPolicy retentionPolicy = RetentionPolicy.bySizeBytes(0L, 1000L);
    String stream1 = "consumptionSize";
    StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
    streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
    configuration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.startUpdateConfiguration(SCOPE, stream1, configuration, null, executor).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join();
    streamStorePartialMock.completeUpdateConfiguration(SCOPE, stream1, configRecord, null, executor).join();
    final Segment seg0 = new Segment(SCOPE, stream1, 0L);
    final Segment seg1 = new Segment(SCOPE, stream1, 1L);
    ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 0L, seg1, 0L);
    Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), startStreamCut));
    ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
    Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), endStreamCut));
    ReaderGroupConfig consumpRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
    consumpRGConfig = ReaderGroupConfig.cloneConfig(consumpRGConfig, UUID.randomUUID(), 0L);
    doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    String subscriber1 = "subscriber1";
    CompletableFuture<Controller.CreateReaderGroupResponse> createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber1, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    Controller.CreateReaderGroupResponse createResponse1 = createStatus.join();
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse1.getStatus());
    assertEquals(0L, createResponse1.getConfig().getGeneration());
    assertFalse(ReaderGroupConfig.DEFAULT_UUID.toString().equals(createResponse1.getConfig().getReaderGroupId()));
    String subscriber2 = "subscriber2";
    createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber2, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    Controller.CreateReaderGroupResponse createResponse2 = createStatus.join();
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse2.getStatus());
    assertEquals(0L, createResponse2.getConfig().getGeneration());
    assertFalse(ReaderGroupConfig.DEFAULT_UUID.toString().equals(createResponse2.getConfig().getReaderGroupId()));
    final String subscriber1Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber1);
    final String subscriber2Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber2);
    // example::
    // | s0 | s3      |
    // |    | s4 |    | s6
    // | s1      | s5 |
    // | s2      |    |
    // valid stream cuts: { s0/off, s5/-1 }, { s0/off, s2/off, s5/-1 }
    // lower bound = { s0/off, s2/off, s5/-1 }
    // valid stream cuts: { s0/off, s5/-1 }, { s0/off, s2/off, s5/-1 }, { s0/off, s1/off, s2/off }
    // lower bound = { s0/off, s1/off, s2/off }
    long three = NameUtils.computeSegmentId(3, 1);
    long four = NameUtils.computeSegmentId(4, 1);
    long five = NameUtils.computeSegmentId(5, 2);
    long six = NameUtils.computeSegmentId(6, 3);
    // 0 split to 3 and 4
    scale(SCOPE, stream1, ImmutableMap.of(0L, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0 / 6), new AbstractMap.SimpleEntry<>(1.0 / 6, 1.0 / 3)));
    // 4, 1, 2 merged to 5
    scale(SCOPE, stream1, ImmutableMap.of(1L, 1L, 2L, 2L, four, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(1.0 / 6, 1.0)));
    // merge 3, 5 to 6
    scale(SCOPE, stream1, ImmutableMap.of(three, 1L, five, 2L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)));
    assertNotEquals(0, consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size());
    streamMetadataTasks.setRetentionFrequencyMillis(1L);
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, five, -1L), 0L).join();
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, createResponse2.getConfig().getReaderGroupId(), createResponse2.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, 2L, 1L, five, -1L), 0L).join();
    Map<Long, Long> map1 = new HashMap<>();
    map1.put(six, 2L);
    long size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
    doReturn(CompletableFuture.completedFuture(new StreamCutRecord(1L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
    // call retention and verify that retention policy applies
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 1L, null, "").join();
    // now retention set has one stream cut 6/2
    // subscriber lowerbound is 0/1, 2/1, 5/-1.. trucation should happen at lowerbound
    VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 1L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(2L).longValue(), 1L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(five).longValue(), -1L);
    assertTrue(truncationRecord.getObject().isUpdating());
    streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
}
Also used : StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) HashMap(java.util.HashMap) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) Segment(io.pravega.client.segment.impl.Segment) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) Stream(io.pravega.client.stream.Stream) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamCut(io.pravega.client.stream.StreamCut) Controller(io.pravega.controller.stream.api.grpc.v1.Controller) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) RGStreamCutRecord(io.pravega.shared.controller.event.RGStreamCutRecord) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) Test(org.junit.Test)

Example 9 with StreamConfigurationRecord

use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.

the class StreamMetadataStoreTest method scaleTest.

@Test(timeout = 30000)
public void scaleTest() throws Exception {
    final String scope = "ScopeScale";
    final String stream = "StreamScale";
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
    long start = System.currentTimeMillis();
    store.createScope(scope, null, executor).get();
    store.createStream(scope, stream, configuration, start, null, executor).get();
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
    // set minimum number of segments to 1 so that we can also test scale downs
    StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
    store.startUpdateConfiguration(scope, stream, config, null, executor).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = store.getConfigurationRecord(scope, stream, null, executor).join();
    store.completeUpdateConfiguration(scope, stream, configRecord, null, executor).join();
    // region idempotent
    long scaleTs = System.currentTimeMillis();
    SimpleEntry<Double, Double> segment1 = new SimpleEntry<>(0.5, 0.75);
    SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.75, 1.0);
    List<Long> scale1SealedSegments = Collections.singletonList(computeSegmentId(1, 0));
    // 1. submit scale
    VersionedMetadata<EpochTransitionRecord> empty = store.getEpochTransition(scope, stream, null, executor).join();
    VersionedMetadata<EpochTransitionRecord> response = store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, null, null, executor).join();
    Map<Long, Map.Entry<Double, Double>> scale1SegmentsCreated = response.getObject().getNewSegmentsWithRange();
    final int scale1ActiveEpoch = response.getObject().getActiveEpoch();
    assertEquals(0, scale1ActiveEpoch);
    // rerun start scale with old epoch transition. should throw write conflict
    AssertExtensions.assertSuppliedFutureThrows("", () -> store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, empty, null, executor), e -> Exceptions.unwrap(e) instanceof StoreException.WriteConflictException);
    // rerun start scale with null epoch transition, should be idempotent
    response = store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, null, null, executor).join();
    assertEquals(response.getObject().getNewSegmentsWithRange(), scale1SegmentsCreated);
    VersionedMetadata<State> state = store.getVersionedState(scope, stream, null, executor).join();
    state = store.updateVersionedState(scope, stream, State.SCALING, state, null, executor).get();
    response = store.startScale(scope, stream, false, response, state, null, executor).join();
    // 2. scale new segments created
    store.scaleCreateNewEpochs(scope, stream, response, null, executor).join();
    // rerun start scale and new segments created
    response = store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, null, null, executor).join();
    assertEquals(response.getObject().getNewSegmentsWithRange(), scale1SegmentsCreated);
    response = store.startScale(scope, stream, false, response, state, null, executor).join();
    store.scaleCreateNewEpochs(scope, stream, response, null, executor).join();
    // 3. scale segments sealed -- this will complete scale
    store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), response, null, executor).join();
    store.completeScale(scope, stream, response, null, executor).join();
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
    // rerun -- idempotent
    store.scaleCreateNewEpochs(scope, stream, response, null, executor).join();
    EpochRecord activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
    assertEquals(1, activeEpoch.getEpoch());
    store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), response, null, executor).join();
    store.getActiveEpoch(scope, stream, null, true, executor).join();
    assertEquals(1, activeEpoch.getEpoch());
    // rerun submit scale -- should fail with precondition failure
    VersionedMetadata<EpochTransitionRecord> etr = store.getEpochTransition(scope, stream, null, executor).join();
    assertEquals(EpochTransitionRecord.EMPTY, empty.getObject());
    AssertExtensions.assertThrows("Submit scale with old data with old etr", () -> store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, empty, null, executor).join(), e -> Exceptions.unwrap(e) instanceof StoreException.WriteConflictException);
    AssertExtensions.assertThrows("Submit scale with old data with latest etr", () -> store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, etr, null, executor).join(), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.PreConditionFailureException);
    AssertExtensions.assertThrows("Submit scale with null etr", () -> store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, null, null, executor).join(), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.PreConditionFailureException);
    // endregion
    // 2 different conflicting scale operations
    // region run concurrent conflicting scale
    SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.0, 0.5);
    SimpleEntry<Double, Double> segment4 = new SimpleEntry<>(0.5, 0.75);
    SimpleEntry<Double, Double> segment5 = new SimpleEntry<>(0.75, 1.0);
    List<Long> scale2SealedSegments = Arrays.asList(computeSegmentId(0, 0), computeSegmentId(2, 1), computeSegmentId(3, 1));
    long scaleTs2 = System.currentTimeMillis();
    response = store.submitScale(scope, stream, scale2SealedSegments, Arrays.asList(segment3, segment4, segment5), scaleTs2, null, null, executor).get();
    Map<Long, Map.Entry<Double, Double>> scale2SegmentsCreated = response.getObject().getNewSegmentsWithRange();
    final int scale2ActiveEpoch = response.getObject().getActiveEpoch();
    store.setState(scope, stream, State.SCALING, null, executor).get();
    // rerun of scale 1 -- should fail with conflict
    AssertExtensions.assertThrows("Concurrent conflicting scale", () -> store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, null, null, executor).join(), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.ConflictException);
    store.scaleCreateNewEpochs(scope, stream, response, null, executor).get();
    store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), response, null, executor).get();
    store.completeScale(scope, stream, response, null, executor).join();
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
    // endregion
    // region concurrent submit scale requests
    // run two concurrent runScale operations such that after doing a getEpochTransition, we create a new epoch
    // transition node. We should get ScaleConflict in such a case.
    // mock createEpochTransition
    SimpleEntry<Double, Double> segment6 = new SimpleEntry<>(0.0, 1.0);
    List<Long> scale3SealedSegments = Arrays.asList(computeSegmentId(4, 2), computeSegmentId(5, 2), computeSegmentId(6, 2));
    long scaleTs3 = System.currentTimeMillis();
    @SuppressWarnings("unchecked") PersistentStreamBase streamObj = (PersistentStreamBase) ((AbstractStreamMetadataStore) store).getStream(scope, stream, null);
    PersistentStreamBase streamObjSpied = spy(streamObj);
    CompletableFuture<Void> latch = new CompletableFuture<>();
    CompletableFuture<Void> updateEpochTransitionCalled = new CompletableFuture<>();
    doAnswer(x -> CompletableFuture.runAsync(() -> {
        // wait until we create epoch transition outside of this method
        updateEpochTransitionCalled.complete(null);
        latch.join();
    }).thenCompose(v -> streamObj.updateEpochTransitionNode(x.getArgument(0), x.getArgument(1)))).when(streamObjSpied).updateEpochTransitionNode(any(), any());
    doAnswer(x -> streamObj.getEpochTransitionNode(x.getArgument(0))).when(streamObjSpied).getEpochTransitionNode(any());
    OperationContext context = new StreamOperationContext(((AbstractStreamMetadataStore) store).getScope(scope, null), streamObjSpied, 0L);
    // the following should be stuck at createEpochTransition
    CompletableFuture<VersionedMetadata<EpochTransitionRecord>> resp = store.submitScale(scope, stream, scale3SealedSegments, Collections.singletonList(segment6), scaleTs3, null, context, executor);
    updateEpochTransitionCalled.join();
    VersionedMetadata<EpochTransitionRecord> epochRecord = streamObj.getEpochTransition(context).join();
    streamObj.updateEpochTransitionNode(new VersionedMetadata<>(EpochTransitionRecord.EMPTY, epochRecord.getVersion()), context).join();
    latch.complete(null);
    AssertExtensions.assertFutureThrows("", resp, e -> Exceptions.unwrap(e) instanceof StoreException.WriteConflictException);
// endregion
}
Also used : Arrays(java.util.Arrays) StreamCut(io.pravega.client.stream.StreamCut) ArgumentMatchers(org.mockito.ArgumentMatchers) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) AssertExtensions(io.pravega.test.common.AssertExtensions) Random(java.util.Random) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) Pair(org.apache.commons.lang3.tuple.Pair) Stream(io.pravega.client.stream.Stream) Duration(java.time.Duration) Map(java.util.Map) After(org.junit.After) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Controller(io.pravega.controller.stream.api.grpc.v1.Controller) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) DeleteScopeStatus(io.pravega.controller.stream.api.grpc.v1.Controller.DeleteScopeStatus) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) StreamCutReferenceRecord(io.pravega.controller.store.stream.records.StreamCutReferenceRecord) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) ImmutableMap(com.google.common.collect.ImmutableMap) Set(java.util.Set) CompletionException(java.util.concurrent.CompletionException) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) List(java.util.List) Assert.assertFalse(org.junit.Assert.assertFalse) Optional(java.util.Optional) HistoryTimeSeries(io.pravega.controller.store.stream.records.HistoryTimeSeries) Futures(io.pravega.common.concurrent.Futures) Segment(io.pravega.client.segment.impl.Segment) CommittingTransactionsRecord(io.pravega.controller.store.stream.records.CommittingTransactionsRecord) NameUtils.computeSegmentId(io.pravega.shared.NameUtils.computeSegmentId) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) Exceptions(io.pravega.common.Exceptions) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) RetentionSet(io.pravega.controller.store.stream.records.RetentionSet) Mockito.spy(org.mockito.Mockito.spy) ArrayList(java.util.ArrayList) Strings(com.google.common.base.Strings) ReaderGroupConfigRecord(io.pravega.controller.store.stream.records.ReaderGroupConfigRecord) Lists(com.google.common.collect.Lists) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) RecordHelper(io.pravega.controller.store.stream.records.RecordHelper) SimpleEntry(java.util.AbstractMap.SimpleEntry) SealedSegmentsMapShard(io.pravega.controller.store.stream.records.SealedSegmentsMapShard) Before(org.junit.Before) NameUtils(io.pravega.shared.NameUtils) Assert.assertNotNull(org.junit.Assert.assertNotNull) WriterMark(io.pravega.controller.store.stream.records.WriterMark) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) TxnResource(io.pravega.controller.store.task.TxnResource) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) ExecutionException(java.util.concurrent.ExecutionException) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) AbstractMap(java.util.AbstractMap) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) Assert.assertNull(org.junit.Assert.assertNull) Version(io.pravega.controller.store.Version) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) Assert(org.junit.Assert) Collections(java.util.Collections) Mockito.reset(org.mockito.Mockito.reset) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) SimpleEntry(java.util.AbstractMap.SimpleEntry) CompletableFuture(java.util.concurrent.CompletableFuture) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) SimpleEntry(java.util.AbstractMap.SimpleEntry) Test(org.junit.Test)

Example 10 with StreamConfigurationRecord

use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.

the class StreamMetadataTasksTest method consumptionBasedRetentionWithNoSubscriber.

@Test(timeout = 30000)
public void consumptionBasedRetentionWithNoSubscriber() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final RetentionPolicy retentionPolicy = RetentionPolicy.byTime(Duration.ofMillis(0L), Duration.ofMillis(Long.MAX_VALUE));
    String stream1 = "consumptionSize4";
    StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
    streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
    configuration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.startUpdateConfiguration(SCOPE, stream1, configuration, null, executor).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join();
    streamStorePartialMock.completeUpdateConfiguration(SCOPE, stream1, configRecord, null, executor).join();
    // example::
    // | s0 | s2           | s7 |
    // |    |              |
    // |    |              |
    // |    |    | s4 | s6 | s8 | s10
    // | s1 | s3 | s5 |    | s9 |
    // valid stream cuts: { s0/off, s9/off, s2/-1, s8/-1}, { s1/off, s2/-1 }
    // lower bound = { s0/off, s1/off }
    long two = NameUtils.computeSegmentId(2, 1);
    long three = NameUtils.computeSegmentId(3, 1);
    long four = NameUtils.computeSegmentId(4, 2);
    long five = NameUtils.computeSegmentId(5, 2);
    long six = NameUtils.computeSegmentId(6, 3);
    long seven = NameUtils.computeSegmentId(7, 4);
    long eight = NameUtils.computeSegmentId(8, 4);
    long nine = NameUtils.computeSegmentId(9, 4);
    long ten = NameUtils.computeSegmentId(10, 5);
    // 0, 1 -> 2, 3 with different split
    scale(SCOPE, stream1, ImmutableMap.of(0L, 1L, 1L, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.6), new AbstractMap.SimpleEntry<>(0.6, 1.0)));
    // s3 -> 4, 5
    scale(SCOPE, stream1, ImmutableMap.of(three, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.6, 0.8), new AbstractMap.SimpleEntry<>(0.8, 1.0)));
    // 4,5 -> 6
    scale(SCOPE, stream1, ImmutableMap.of(four, 1L, five, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.6, 1.0)));
    // 2, 6 -> 7, 8, 9
    scale(SCOPE, stream1, ImmutableMap.of(two, 1L, six, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.3), new AbstractMap.SimpleEntry<>(0.3, 0.6), new AbstractMap.SimpleEntry<>(0.6, 1.0)));
    // 7, 8, 9 -> 10
    scale(SCOPE, stream1, ImmutableMap.of(seven, 1L, eight, 1L, nine, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)));
    assertNotEquals(0, consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size());
    doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    streamMetadataTasks.setRetentionFrequencyMillis(1L);
    Map<Long, Long> map1 = new HashMap<>();
    map1.put(ten, 2L);
    long size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
    doReturn(CompletableFuture.completedFuture(new StreamCutRecord(1L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
    // call retention and verify that retention policy applies
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 1L, null, "").join();
    // now retention set has one stream cut 10/2
    // subscriber lowerbound is 0/1, 1/1.. trucation should happen at 10/2
    VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertEquals(truncationRecord.getObject().getStreamCut().get(ten).longValue(), 2L);
    assertTrue(truncationRecord.getObject().isUpdating());
    streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
}
Also used : ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) HashMap(java.util.HashMap) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) RGStreamCutRecord(io.pravega.shared.controller.event.RGStreamCutRecord) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) Test(org.junit.Test)

Aggregations

StreamConfigurationRecord (io.pravega.controller.store.stream.records.StreamConfigurationRecord)28 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)23 Test (org.junit.Test)22 ArgumentMatchers.anyString (org.mockito.ArgumentMatchers.anyString)16 ScalingPolicy (io.pravega.client.stream.ScalingPolicy)14 VersionedMetadata (io.pravega.controller.store.VersionedMetadata)10 HashMap (java.util.HashMap)10 CompletableFuture (java.util.concurrent.CompletableFuture)10 StreamTruncationRecord (io.pravega.controller.store.stream.records.StreamTruncationRecord)9 ArgumentMatchers.anyLong (org.mockito.ArgumentMatchers.anyLong)9 AtomicLong (java.util.concurrent.atomic.AtomicLong)8 RetentionPolicy (io.pravega.client.stream.RetentionPolicy)7 Lists (com.google.common.collect.Lists)6 Segment (io.pravega.client.segment.impl.Segment)6 ReaderGroupConfig (io.pravega.client.stream.ReaderGroupConfig)6 Stream (io.pravega.client.stream.Stream)6 StreamCut (io.pravega.client.stream.StreamCut)6 StreamCutImpl (io.pravega.client.stream.impl.StreamCutImpl)6 Exceptions (io.pravega.common.Exceptions)6 ExecutorServiceHelpers (io.pravega.common.concurrent.ExecutorServiceHelpers)6