Search in sources :

Example 1 with StreamTruncationRecord

use of io.pravega.controller.store.stream.tables.StreamTruncationRecord in project pravega by pravega.

the class StreamMetadataTasksTest method sizeBasedRetentionStreamTest.

@Test(timeout = 30000)
public void sizeBasedRetentionStreamTest() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.SIZE).retentionParam(100L).build();
    String streamName = "test";
    final StreamConfiguration configuration = StreamConfiguration.builder().scope(SCOPE).streamName(streamName).scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.createStream(SCOPE, streamName, configuration, System.currentTimeMillis(), null, executor).get();
    streamStorePartialMock.setState(SCOPE, streamName, State.ACTIVE, null, executor).get();
    assertNotEquals(0, consumer.getCurrentSegments(SCOPE, streamName).get().size());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    // region size based retention on stream cuts on epoch 0
    // region no previous streamcut
    // first retention iteration
    // streamcut1: 19 bytes(0/9,1/10)
    long recordingTime1 = System.currentTimeMillis();
    Map<Integer, Long> map1 = new HashMap<>();
    map1.put(0, 9L);
    map1.put(1, 10L);
    long size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map1, null, executor).join();
    assertEquals(size, 19);
    StreamCutRecord streamCut1 = new StreamCutRecord(recordingTime1, size, map1);
    doReturn(CompletableFuture.completedFuture(streamCut1)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any());
    streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime1, null, "").get();
    // verify that one streamCut is generated and added.
    List<StreamCutRecord> list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, streamName, null, executor).get();
    assertTrue(list.contains(streamCut1));
    // endregion
    // region stream cut exists but latest - previous < retention.size
    // second retention iteration
    // streamcut2: 100 bytes(0/50, 1/50)
    Map<Integer, Long> map2 = new HashMap<>();
    map2.put(0, 50L);
    map2.put(1, 50L);
    long recordingTime2 = recordingTime1 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
    size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map2, null, executor).join();
    assertEquals(size, 100L);
    StreamCutRecord streamCut2 = new StreamCutRecord(recordingTime2, size, map2);
    doReturn(CompletableFuture.completedFuture(streamCut2)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), anyString());
    streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime2, null, "").get();
    list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, streamName, null, executor).get();
    StreamProperty<StreamTruncationRecord> truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    // verify that two stream cut is in retention set. streamCut2 is added
    // verify that truncation did not happen
    assertTrue(list.contains(streamCut1));
    assertTrue(list.contains(streamCut2));
    assertTrue(!truncProp.isUpdating());
    // endregion
    // region latest - previous > retention.size
    // third retention iteration
    // streamcut3: 120 bytes(0/60, 1/60)
    Map<Integer, Long> map3 = new HashMap<>();
    map3.put(0, 60L);
    map3.put(1, 60L);
    size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map3, null, executor).join();
    assertEquals(size, 120L);
    long recordingTime3 = recordingTime2 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
    StreamCutRecord streamCut3 = new StreamCutRecord(recordingTime3, size, map3);
    doReturn(CompletableFuture.completedFuture(streamCut3)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), anyString());
    streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime3, null, "").get();
    // verify two stream cuts are in retention set. Cut 2 and 3.
    // verify that Truncation has happened.
    list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, streamName, null, executor).get();
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    assertTrue(!list.contains(streamCut1));
    assertTrue(list.contains(streamCut2));
    assertTrue(list.contains(streamCut3));
    assertTrue(truncProp.isUpdating());
    assertTrue(truncProp.getProperty().getStreamCut().get(0) == 9L && truncProp.getProperty().getStreamCut().get(1) == 10L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    assertFalse(truncProp.isUpdating());
    // endregion
    // endregion
    // region test retention over multiple epochs
    // scale1 --> seal segments 0 and 1 and create 2 and 3. (0/70, 1/70)
    List<AbstractMap.SimpleEntry<Double, Double>> newRanges = new ArrayList<>();
    newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 0.5));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.5, 1.0));
    Map<Integer, Long> sealedSegmentsWithSize = new HashMap<>();
    sealedSegmentsWithSize.put(0, 70L);
    sealedSegmentsWithSize.put(1, 70L);
    scale(SCOPE, streamName, sealedSegmentsWithSize, newRanges);
    // region latest streamcut on new epoch but latest (newepoch) - previous (oldepoch) < retention.size
    // 4th retention iteration
    // streamcut4: (2/29, 3/30)
    Map<Integer, Long> map4 = new HashMap<>();
    map4.put(2, 29L);
    map4.put(3, 30L);
    size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map4, null, executor).join();
    assertEquals(size, 199L);
    long recordingTime4 = recordingTime3 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
    StreamCutRecord streamCut4 = new StreamCutRecord(recordingTime4, size, map4);
    doReturn(CompletableFuture.completedFuture(streamCut4)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), anyString());
    streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime4, null, "").get();
    list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, streamName, null, executor).get();
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    assertFalse(list.contains(streamCut1));
    assertTrue(list.contains(streamCut2));
    assertTrue(list.contains(streamCut3));
    assertTrue(list.contains(streamCut4));
    assertFalse(truncProp.isUpdating());
    // endregion
    // region latest streamcut on new epoch but latest (newepoch) - previous (oldepoch) > retention.size
    // 5th retention iteration
    // streamcut5: 221 bytes(2/41, 3/40)
    Map<Integer, Long> map5 = new HashMap<>();
    map5.put(2, 41L);
    map5.put(3, 40L);
    size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map5, null, executor).join();
    assertEquals(size, 221L);
    long recordingTime5 = recordingTime4 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
    StreamCutRecord streamCut5 = new StreamCutRecord(recordingTime5, size, map5);
    doReturn(CompletableFuture.completedFuture(streamCut5)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), anyString());
    streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime5, null, "").get();
    list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, streamName, null, executor).get();
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    assertFalse(list.contains(streamCut1));
    assertFalse(list.contains(streamCut2));
    assertFalse(list.contains(streamCut3));
    assertTrue(list.contains(streamCut4));
    assertTrue(list.contains(streamCut5));
    assertTrue(truncProp.isUpdating());
    assertTrue(truncProp.getProperty().getStreamCut().get(0) == 60L && truncProp.getProperty().getStreamCut().get(1) == 60L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    assertFalse(truncProp.isUpdating());
    // endregion
    // region test retention with external manual truncation
    // scale2 -->  split segment 2 to 4 and 5. Sealed size for segment 2 = 50
    newRanges = new ArrayList<>();
    newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 0.25));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.25, 0.5));
    sealedSegmentsWithSize = new HashMap<>();
    sealedSegmentsWithSize.put(2, 50L);
    scale(SCOPE, streamName, sealedSegmentsWithSize, newRanges);
    // region add streamcut on new epoch such that latest - oldest < retention.size
    // streamcut6: 290 bytes (3/40, 4/30, 5/30)
    // verify no new truncation happens..
    Map<Integer, Long> map6 = new HashMap<>();
    map6.put(3, 40L);
    map6.put(4, 30L);
    map6.put(5, 30L);
    size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map6, null, executor).join();
    assertEquals(size, 290L);
    long recordingTime6 = recordingTime5 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
    StreamCutRecord streamCut6 = new StreamCutRecord(recordingTime6, size, map6);
    doReturn(CompletableFuture.completedFuture(streamCut6)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), anyString());
    streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime6, null, "").get();
    list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, streamName, null, executor).get();
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    assertFalse(list.contains(streamCut1));
    assertFalse(list.contains(streamCut2));
    assertFalse(list.contains(streamCut3));
    assertTrue(list.contains(streamCut4));
    assertTrue(list.contains(streamCut5));
    assertTrue(list.contains(streamCut6));
    assertFalse(truncProp.isUpdating());
    // endregion
    // truncate on manual streamcutManual: (1/65, 4/10, 5/10)
    Map<Integer, Long> streamCutManual = new HashMap<>();
    streamCutManual.put(1, 65L);
    streamCutManual.put(4, 10L);
    streamCutManual.put(5, 10L);
    CompletableFuture<UpdateStreamStatus.Status> future = streamMetadataTasks.truncateStream(SCOPE, streamName, streamCutManual, null);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    assertTrue(Futures.await(future));
    assertEquals(future.join(), UpdateStreamStatus.Status.SUCCESS);
    // streamcut7: 340 bytes (3/50, 4/50, 5/50)
    Map<Integer, Long> map7 = new HashMap<>();
    map7.put(3, 50L);
    map7.put(4, 50L);
    map7.put(5, 50L);
    size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map7, null, executor).join();
    assertEquals(size, 340L);
    long recordingTime7 = recordingTime6 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
    StreamCutRecord streamCut7 = new StreamCutRecord(recordingTime7, size, map7);
    doReturn(CompletableFuture.completedFuture(streamCut7)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), anyString());
    // verify no new truncation.. streamcut5 should be chosen but discarded because it is not strictly-ahead-of-truncationRecord
    streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime7, null, "").join();
    list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, streamName, null, executor).get();
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    assertFalse(list.contains(streamCut1));
    assertFalse(list.contains(streamCut2));
    assertFalse(list.contains(streamCut3));
    assertTrue(list.contains(streamCut4));
    assertTrue(list.contains(streamCut5));
    assertTrue(list.contains(streamCut6));
    assertTrue(list.contains(streamCut7));
    assertFalse(truncProp.isUpdating());
    // streamcut8: 400 bytes (3/70, 4/70, 5/70)
    Map<Integer, Long> map8 = new HashMap<>();
    map8.put(3, 70L);
    map8.put(4, 70L);
    map8.put(5, 70L);
    size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map8, null, executor).join();
    assertEquals(size, 400L);
    long recordingTime8 = recordingTime7 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
    StreamCutRecord streamCut8 = new StreamCutRecord(recordingTime8, size, map8);
    doReturn(CompletableFuture.completedFuture(streamCut8)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), anyString());
    streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime8, null, "").get();
    list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, streamName, null, executor).get();
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    // verify truncation happens at streamcut6
    assertFalse(list.contains(streamCut1));
    assertFalse(list.contains(streamCut2));
    assertFalse(list.contains(streamCut3));
    assertFalse(list.contains(streamCut4));
    assertFalse(list.contains(streamCut5));
    assertFalse(list.contains(streamCut6));
    assertTrue(list.contains(streamCut7));
    assertTrue(truncProp.isUpdating());
    assertTrue(truncProp.getProperty().getStreamCut().get(3) == 40L && truncProp.getProperty().getStreamCut().get(4) == 30L && truncProp.getProperty().getStreamCut().get(5) == 30L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    assertFalse(truncProp.isUpdating());
// endregion
// endregion
}
Also used : ScaleStreamStatus(io.pravega.controller.stream.api.grpc.v1.Controller.ScaleResponse.ScaleStreamStatus) UpdateStreamStatus(io.pravega.controller.stream.api.grpc.v1.Controller.UpdateStreamStatus) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamTruncationRecord(io.pravega.controller.store.stream.tables.StreamTruncationRecord) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) AbstractMap(java.util.AbstractMap) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) StreamCutRecord(io.pravega.controller.store.stream.StreamCutRecord) Test(org.junit.Test)

Example 2 with StreamTruncationRecord

use of io.pravega.controller.store.stream.tables.StreamTruncationRecord in project pravega by pravega.

the class StreamMetadataTasksTest method truncateStreamTest.

@Test(timeout = 30000)
public void truncateStreamTest() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final StreamConfiguration configuration = StreamConfiguration.builder().scope(SCOPE).streamName("test").scalingPolicy(policy).build();
    streamStorePartialMock.createStream(SCOPE, "test", configuration, System.currentTimeMillis(), null, executor).get();
    streamStorePartialMock.setState(SCOPE, "test", State.ACTIVE, null, executor).get();
    assertNotEquals(0, consumer.getCurrentSegments(SCOPE, "test").get().size());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    List<AbstractMap.SimpleEntry<Double, Double>> newRanges = new ArrayList<>();
    newRanges.add(new AbstractMap.SimpleEntry<>(0.5, 0.75));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.75, 1.0));
    ScaleResponse scaleOpResult = streamMetadataTasks.manualScale(SCOPE, "test", Collections.singletonList(1), newRanges, 30, null).get();
    assertTrue(scaleOpResult.getStatus().equals(ScaleStreamStatus.STARTED));
    ScaleOperationTask scaleTask = new ScaleOperationTask(streamMetadataTasks, streamStorePartialMock, executor);
    assertTrue(Futures.await(scaleTask.execute((ScaleOpEvent) requestEventWriter.eventQueue.take())));
    // start truncation
    StreamProperty<StreamTruncationRecord> truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, "test", true, null, executor).join();
    assertFalse(truncProp.isUpdating());
    // 1. happy day test
    // update.. should succeed
    Map<Integer, Long> streamCut = new HashMap<>();
    streamCut.put(0, 1L);
    streamCut.put(1, 11L);
    CompletableFuture<UpdateStreamStatus.Status> truncateFuture = streamMetadataTasks.truncateStream(SCOPE, "test", streamCut, null);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    assertEquals(UpdateStreamStatus.Status.SUCCESS, truncateFuture.join());
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, "test", true, null, executor).join();
    assertTrue(truncProp.getProperty().getStreamCut().equals(streamCut));
    assertTrue(truncProp.getProperty().getStreamCut().equals(streamCut));
    // 2. change state to scaling
    streamStorePartialMock.setState(SCOPE, "test", State.SCALING, null, executor).get();
    // call update should fail without posting the event
    Map<Integer, Long> streamCut2 = new HashMap<>();
    streamCut2.put(0, 1L);
    streamCut2.put(2, 1L);
    streamCut2.put(3, 1L);
    streamMetadataTasks.truncateStream(SCOPE, "test", streamCut2, null);
    AtomicBoolean loop = new AtomicBoolean(false);
    Futures.loop(() -> !loop.get(), () -> streamStorePartialMock.getTruncationProperty(SCOPE, "test", true, null, executor).thenApply(StreamProperty::isUpdating).thenAccept(loop::set), executor).join();
    // event posted, first step performed. now pick the event for processing
    TruncateStreamTask truncateStreamTask = new TruncateStreamTask(streamMetadataTasks, streamStorePartialMock, executor);
    TruncateStreamEvent taken = (TruncateStreamEvent) requestEventWriter.eventQueue.take();
    AssertExtensions.assertThrows("", truncateStreamTask.execute(taken), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
    streamStorePartialMock.setState(SCOPE, "test", State.ACTIVE, null, executor).get();
    // now with state = active, process the same event. it should succeed now.
    assertTrue(Futures.await(truncateStreamTask.execute(taken)));
    // 3. multiple back to back updates.
    Map<Integer, Long> streamCut3 = new HashMap<>();
    streamCut3.put(0, 12L);
    streamCut3.put(2, 12L);
    streamCut3.put(3, 12L);
    CompletableFuture<UpdateStreamStatus.Status> truncateOp1 = streamMetadataTasks.truncateStream(SCOPE, "test", streamCut3, null);
    // ensure that previous updatestream has posted the event and set status to updating,
    // only then call second updateStream
    AtomicBoolean loop2 = new AtomicBoolean(false);
    Futures.loop(() -> !loop2.get(), () -> streamStorePartialMock.getTruncationProperty(SCOPE, "test", true, null, executor).thenApply(StreamProperty::isUpdating).thenAccept(loop2::set), executor).join();
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, "test", true, null, executor).join();
    assertTrue(truncProp.getProperty().getStreamCut().equals(streamCut3) && truncProp.isUpdating());
    // post the second update request. This should fail here itself as previous one has started.
    Map<Integer, Long> streamCut4 = new HashMap<>();
    streamCut4.put(0, 14L);
    streamCut4.put(2, 14L);
    streamCut4.put(3, 14L);
    CompletableFuture<UpdateStreamStatus.Status> truncateOpFuture2 = streamMetadataTasks.truncateStream(SCOPE, "test", streamCut4, null);
    assertEquals(UpdateStreamStatus.Status.FAILURE, truncateOpFuture2.join());
    // process event
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    // verify that first request for update also completes with success.
    assertEquals(UpdateStreamStatus.Status.SUCCESS, truncateOp1.join());
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, "test", true, null, executor).join();
    assertTrue(truncProp.getProperty().getStreamCut().equals(streamCut3) && !truncProp.isUpdating());
}
Also used : StreamTruncationRecord(io.pravega.controller.store.stream.tables.StreamTruncationRecord) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) AbstractMap(java.util.AbstractMap) TruncateStreamEvent(io.pravega.shared.controller.event.TruncateStreamEvent) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) ScaleStreamStatus(io.pravega.controller.stream.api.grpc.v1.Controller.ScaleResponse.ScaleStreamStatus) UpdateStreamStatus(io.pravega.controller.stream.api.grpc.v1.Controller.UpdateStreamStatus) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StartScaleResponse(io.pravega.controller.store.stream.StartScaleResponse) ScaleResponse(io.pravega.controller.stream.api.grpc.v1.Controller.ScaleResponse) ScaleOperationTask(io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask) StoreException(io.pravega.controller.store.stream.StoreException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) TruncateStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.TruncateStreamTask) Test(org.junit.Test)

Example 3 with StreamTruncationRecord

use of io.pravega.controller.store.stream.tables.StreamTruncationRecord in project pravega by pravega.

the class StreamMetadataTasksTest method timeBasedRetentionStreamTest.

@Test(timeout = 30000)
public void timeBasedRetentionStreamTest() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.TIME).retentionParam(Duration.ofMinutes(60).toMillis()).build();
    final StreamConfiguration configuration = StreamConfiguration.builder().scope(SCOPE).streamName("test").scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.createStream(SCOPE, "test", configuration, System.currentTimeMillis(), null, executor).get();
    streamStorePartialMock.setState(SCOPE, "test", State.ACTIVE, null, executor).get();
    assertNotEquals(0, consumer.getCurrentSegments(SCOPE, "test").get().size());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    long recordingTime1 = System.currentTimeMillis();
    Map<Integer, Long> map1 = new HashMap<>();
    map1.put(0, 1L);
    map1.put(1, 1L);
    StreamCutRecord streamCut1 = new StreamCutRecord(recordingTime1, Long.MIN_VALUE, map1);
    doReturn(CompletableFuture.completedFuture(streamCut1)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), // mock only isTransactionOngoing call.
    any());
    streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime1, null, "").get();
    // verify that one streamCut is generated and added.
    List<StreamCutRecord> list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, "test", null, executor).get();
    assertTrue(list.contains(streamCut1));
    Map<Integer, Long> map2 = new HashMap<>();
    map2.put(0, 10L);
    map2.put(1, 10L);
    long recordingTime2 = recordingTime1 + Duration.ofMinutes(5).toMillis();
    StreamCutRecord streamCut2 = new StreamCutRecord(recordingTime2, Long.MIN_VALUE, map2);
    doReturn(CompletableFuture.completedFuture(streamCut2)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), // mock only isTransactionOngoing call.
    any());
    streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime2, null, "").get();
    list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, "test", null, executor).get();
    StreamProperty<StreamTruncationRecord> truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, "test", true, null, executor).get();
    // verify that only one stream cut is in retention set. streamCut2 is not added
    // verify that truncation did not happen
    assertTrue(list.contains(streamCut1));
    assertTrue(!list.contains(streamCut2));
    assertTrue(!truncProp.isUpdating());
    Map<Integer, Long> map3 = new HashMap<>();
    map3.put(0, 20L);
    map3.put(1, 20L);
    long recordingTime3 = recordingTime1 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
    StreamCutRecord streamCut3 = new StreamCutRecord(recordingTime3, Long.MIN_VALUE, map3);
    doReturn(CompletableFuture.completedFuture(streamCut3)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), // mock only isTransactionOngoing call.
    any());
    streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime3, null, "").get();
    // verify two stream cuts are in retention set. Cut 1 and 3.
    // verify that Truncation not not happened.
    list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, "test", null, executor).get();
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, "test", true, null, executor).get();
    assertTrue(list.contains(streamCut1));
    assertTrue(!list.contains(streamCut2));
    assertTrue(list.contains(streamCut3));
    assertTrue(!truncProp.isUpdating());
    Map<Integer, Long> map4 = new HashMap<>();
    map4.put(0, 20L);
    map4.put(1, 20L);
    long recordingTime4 = recordingTime1 + retentionPolicy.getRetentionParam() + 2;
    StreamCutRecord streamCut4 = new StreamCutRecord(recordingTime4, Long.MIN_VALUE, map4);
    doReturn(CompletableFuture.completedFuture(streamCut4)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any());
    streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime4, null, "").get();
    // verify that only two stream cut are in retention set. streamcut 3 and 4
    // verify that truncation has started. verify that streamCut1 is removed from retention set as that has been used for truncation
    list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, "test", null, executor).get();
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, "test", true, null, executor).get();
    assertTrue(!list.contains(streamCut1));
    assertTrue(!list.contains(streamCut2));
    assertTrue(list.contains(streamCut3));
    assertTrue(list.contains(streamCut4));
    assertTrue(truncProp.isUpdating());
    assertTrue(truncProp.getProperty().getStreamCut().get(0) == 1L && truncProp.getProperty().getStreamCut().get(1) == 1L);
}
Also used : ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamTruncationRecord(io.pravega.controller.store.stream.tables.StreamTruncationRecord) HashMap(java.util.HashMap) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) StreamCutRecord(io.pravega.controller.store.stream.StreamCutRecord) Test(org.junit.Test)

Example 4 with StreamTruncationRecord

use of io.pravega.controller.store.stream.tables.StreamTruncationRecord in project pravega by pravega.

the class StreamMetadataStoreTest method truncationTest.

@Test
public void truncationTest() throws Exception {
    final String scope = "ScopeTruncate";
    final String stream = "ScopeTruncate";
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final StreamConfiguration configuration = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).build();
    long start = System.currentTimeMillis();
    store.createScope(scope).get();
    store.createStream(scope, stream, configuration, start, null, executor).get();
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
    Map<Integer, Long> truncation = new HashMap<>();
    truncation.put(0, 0L);
    truncation.put(1, 0L);
    assertTrue(Futures.await(store.startTruncation(scope, stream, truncation, null, executor)));
    StreamProperty<StreamTruncationRecord> truncationProperty = store.getTruncationProperty(scope, stream, true, null, executor).join();
    assertTrue(truncationProperty.isUpdating());
    Map<Integer, Long> truncation2 = new HashMap<>();
    truncation2.put(0, 0L);
    truncation2.put(1, 0L);
    assertFalse(Futures.await(store.startTruncation(scope, stream, truncation2, null, executor)));
    assertTrue(Futures.await(store.completeTruncation(scope, stream, null, executor)));
    truncationProperty = store.getTruncationProperty(scope, stream, true, null, executor).join();
    assertEquals(truncation, truncationProperty.getProperty().getStreamCut());
    assertTrue(truncationProperty.getProperty().getCutEpochMap().size() == 2);
    Map<Integer, Long> truncation3 = new HashMap<>();
    truncation3.put(0, 0L);
    truncation3.put(1, 0L);
    assertTrue(Futures.await(store.startTruncation(scope, stream, truncation3, null, executor)));
    assertTrue(Futures.await(store.completeUpdateConfiguration(scope, stream, null, executor)));
}
Also used : ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamTruncationRecord(io.pravega.controller.store.stream.tables.StreamTruncationRecord) HashMap(java.util.HashMap) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) Test(org.junit.Test)

Example 5 with StreamTruncationRecord

use of io.pravega.controller.store.stream.tables.StreamTruncationRecord in project pravega by pravega.

the class TableHelperTest method truncationTest.

@Test(timeout = 10000)
public void truncationTest() {
    final List<Integer> startSegments = Lists.newArrayList(0, 1);
    // epoch 0
    long timestamp = System.currentTimeMillis();
    byte[] segmentTable = createSegmentTable(2, timestamp);
    byte[] historyTable = TableHelper.createHistoryTable(timestamp, startSegments);
    byte[] indexTable = TableHelper.createIndexTable(timestamp, 0);
    List<Integer> activeSegments = TableHelper.getActiveSegments(historyTable);
    assertEquals(activeSegments, startSegments);
    // epoch 1
    List<Integer> newSegments1 = Lists.newArrayList(0, 2, 3);
    List<AbstractMap.SimpleEntry<Double, Double>> newRanges = new ArrayList<>();
    newRanges.add(new AbstractMap.SimpleEntry<Double, Double>(0.5, 0.75));
    newRanges.add(new AbstractMap.SimpleEntry<Double, Double>(0.75, 1.0));
    segmentTable = updateSegmentTable(segmentTable, newRanges, timestamp + 1);
    historyTable = TableHelper.addPartialRecordToHistoryTable(historyTable, newSegments1);
    HistoryRecord partial = HistoryRecord.readLatestRecord(historyTable, false).get();
    historyTable = TableHelper.completePartialRecordInHistoryTable(historyTable, partial, timestamp + 1);
    indexTable = TableHelper.updateIndexTable(indexTable, timestamp + 1, partial.getOffset());
    // epoch 2
    List<Integer> newSegments2 = Lists.newArrayList(0, 2, 4, 5);
    newRanges = new ArrayList<>();
    newRanges.add(new AbstractMap.SimpleEntry<Double, Double>(0.75, (0.75 + 1.0) / 2));
    newRanges.add(new AbstractMap.SimpleEntry<Double, Double>((0.75 + 1.0) / 2, 1.0));
    segmentTable = updateSegmentTable(segmentTable, newRanges, timestamp + 2);
    historyTable = TableHelper.addPartialRecordToHistoryTable(historyTable, newSegments2);
    partial = HistoryRecord.readLatestRecord(historyTable, false).get();
    historyTable = TableHelper.completePartialRecordInHistoryTable(historyTable, partial, timestamp + 2);
    indexTable = TableHelper.updateIndexTable(indexTable, timestamp + 2, partial.getOffset());
    // epoch 3
    List<Integer> newSegments3 = Lists.newArrayList(0, 4, 5, 6, 7);
    newRanges = new ArrayList<>();
    newRanges.add(new AbstractMap.SimpleEntry<Double, Double>(0.5, (0.75 + 0.5) / 2));
    newRanges.add(new AbstractMap.SimpleEntry<Double, Double>((0.75 + 0.5) / 2, 0.75));
    segmentTable = updateSegmentTable(segmentTable, newRanges, timestamp + 3);
    historyTable = TableHelper.addPartialRecordToHistoryTable(historyTable, newSegments3);
    partial = HistoryRecord.readLatestRecord(historyTable, false).get();
    historyTable = TableHelper.completePartialRecordInHistoryTable(historyTable, partial, timestamp + 3);
    indexTable = TableHelper.updateIndexTable(indexTable, timestamp + 3, partial.getOffset());
    // epoch 4
    List<Integer> newSegments4 = Lists.newArrayList(4, 5, 6, 7, 8, 9);
    newRanges = new ArrayList<>();
    newRanges.add(new AbstractMap.SimpleEntry<Double, Double>(0.0, (0.0 + 0.5) / 2));
    newRanges.add(new AbstractMap.SimpleEntry<Double, Double>((0.0 + 0.5) / 2, 0.5));
    segmentTable = updateSegmentTable(segmentTable, newRanges, timestamp + 4);
    historyTable = TableHelper.addPartialRecordToHistoryTable(historyTable, newSegments4);
    partial = HistoryRecord.readLatestRecord(historyTable, false).get();
    historyTable = TableHelper.completePartialRecordInHistoryTable(historyTable, partial, timestamp + 4);
    indexTable = TableHelper.updateIndexTable(indexTable, timestamp + 4, partial.getOffset());
    // happy day
    Map<Integer, Long> streamCut1 = new HashMap<>();
    streamCut1.put(0, 1L);
    streamCut1.put(1, 1L);
    StreamTruncationRecord truncationRecord = TableHelper.computeTruncationRecord(indexTable, historyTable, segmentTable, streamCut1, StreamTruncationRecord.EMPTY);
    assertTrue(truncationRecord.getToDelete().isEmpty());
    assertTrue(truncationRecord.getStreamCut().equals(streamCut1));
    assertTrue(truncationRecord.getCutEpochMap().get(0) == 0 && truncationRecord.getCutEpochMap().get(1) == 0);
    truncationRecord = truncationRecord.mergeDeleted();
    Map<Integer, Long> streamCut2 = new HashMap<>();
    streamCut2.put(0, 1L);
    streamCut2.put(2, 1L);
    streamCut2.put(4, 1L);
    streamCut2.put(5, 1L);
    truncationRecord = TableHelper.computeTruncationRecord(indexTable, historyTable, segmentTable, streamCut2, truncationRecord);
    assertTrue(truncationRecord.getToDelete().size() == 2 && truncationRecord.getToDelete().contains(1) && truncationRecord.getToDelete().contains(3));
    assertTrue(truncationRecord.getStreamCut().equals(streamCut2));
    assertTrue(truncationRecord.getCutEpochMap().get(0) == 2 && truncationRecord.getCutEpochMap().get(2) == 2 && truncationRecord.getCutEpochMap().get(4) == 2 && truncationRecord.getCutEpochMap().get(5) == 2);
    truncationRecord = truncationRecord.mergeDeleted();
    Map<Integer, Long> streamCut3 = new HashMap<>();
    streamCut3.put(2, 10L);
    streamCut3.put(4, 10L);
    streamCut3.put(5, 10L);
    streamCut3.put(8, 10L);
    streamCut3.put(9, 10L);
    truncationRecord = TableHelper.computeTruncationRecord(indexTable, historyTable, segmentTable, streamCut3, truncationRecord);
    assertTrue(truncationRecord.getToDelete().size() == 1 && truncationRecord.getToDelete().contains(0));
    assertTrue(truncationRecord.getStreamCut().equals(streamCut3));
    assertTrue(truncationRecord.getCutEpochMap().get(2) == 2 && truncationRecord.getCutEpochMap().get(4) == 4 && truncationRecord.getCutEpochMap().get(5) == 4 && truncationRecord.getCutEpochMap().get(8) == 4 && truncationRecord.getCutEpochMap().get(9) == 4);
    truncationRecord = truncationRecord.mergeDeleted();
    // behind previous
    Map<Integer, Long> streamCut4 = new HashMap<>();
    streamCut4.put(2, 1L);
    streamCut4.put(4, 1L);
    streamCut4.put(5, 1L);
    streamCut4.put(8, 1L);
    streamCut4.put(9, 1L);
    byte[] finalIndexTable = indexTable;
    byte[] finalHistoryTable = historyTable;
    byte[] finalSegmentTable = segmentTable;
    StreamTruncationRecord finalTruncationRecord = truncationRecord;
    AssertExtensions.assertThrows("", () -> TableHelper.computeTruncationRecord(finalIndexTable, finalHistoryTable, finalSegmentTable, streamCut4, finalTruncationRecord), e -> e instanceof IllegalArgumentException);
    Map<Integer, Long> streamCut5 = new HashMap<>();
    streamCut3.put(2, 10L);
    streamCut3.put(4, 10L);
    streamCut3.put(5, 10L);
    streamCut3.put(0, 10L);
    AssertExtensions.assertThrows("", () -> TableHelper.computeTruncationRecord(finalIndexTable, finalHistoryTable, finalSegmentTable, streamCut5, finalTruncationRecord), e -> e instanceof IllegalArgumentException);
}
Also used : StreamTruncationRecord(io.pravega.controller.store.stream.tables.StreamTruncationRecord) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HistoryRecord(io.pravega.controller.store.stream.tables.HistoryRecord) AbstractMap(java.util.AbstractMap) Test(org.junit.Test)

Aggregations

StreamTruncationRecord (io.pravega.controller.store.stream.tables.StreamTruncationRecord)6 HashMap (java.util.HashMap)5 Test (org.junit.Test)5 ScalingPolicy (io.pravega.client.stream.ScalingPolicy)4 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)4 ControllerEventStreamWriterMock (io.pravega.controller.mocks.ControllerEventStreamWriterMock)3 AbstractMap (java.util.AbstractMap)3 ArrayList (java.util.ArrayList)3 RetentionPolicy (io.pravega.client.stream.RetentionPolicy)2 StreamCutRecord (io.pravega.controller.store.stream.StreamCutRecord)2 ScaleStreamStatus (io.pravega.controller.stream.api.grpc.v1.Controller.ScaleResponse.ScaleStreamStatus)2 UpdateStreamStatus (io.pravega.controller.stream.api.grpc.v1.Controller.UpdateStreamStatus)2 ScaleOperationTask (io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask)1 TruncateStreamTask (io.pravega.controller.server.eventProcessor.requesthandlers.TruncateStreamTask)1 StartScaleResponse (io.pravega.controller.store.stream.StartScaleResponse)1 StoreException (io.pravega.controller.store.stream.StoreException)1 HistoryRecord (io.pravega.controller.store.stream.tables.HistoryRecord)1 ScaleResponse (io.pravega.controller.stream.api.grpc.v1.Controller.ScaleResponse)1 TruncateStreamEvent (io.pravega.shared.controller.event.TruncateStreamEvent)1 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)1