use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.
the class ScaleRequestHandlerTest method testScaleRequestWithMinimumSegment.
@Test(timeout = 30000)
public void testScaleRequestWithMinimumSegment() throws ExecutionException, InterruptedException {
AutoScaleTask requestHandler = new AutoScaleTask(streamMetadataTasks, streamStore, executor);
ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
StreamRequestHandler multiplexer = new StreamRequestHandler(requestHandler, scaleRequestHandler, null, null, null, null, null, null, null, streamStore, null, executor);
EventWriterMock writer = new EventWriterMock();
streamMetadataTasks.setRequestEventWriter(writer);
String stream = "mystream";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 5)).build();
streamMetadataTasks.createStream(scope, stream, config, System.currentTimeMillis(), 0L).get();
// change stream configuration to min segment count = 4
config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 4)).build();
streamStore.startUpdateConfiguration(scope, stream, config, null, executor).join();
VersionedMetadata<StreamConfigurationRecord> configRecord = streamStore.getConfigurationRecord(scope, stream, null, executor).join();
streamStore.completeUpdateConfiguration(scope, stream, configRecord, null, executor).join();
// process first auto scale down event. it should only mark the segment as cold
multiplexer.process(new AutoScaleEvent(scope, stream, 1L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
assertTrue(writer.queue.isEmpty());
assertTrue(streamStore.isCold(scope, stream, 1L, null, executor).join());
// process second auto scale down event. since its not for an immediate neighbour so it should only mark the segment as cold
multiplexer.process(new AutoScaleEvent(scope, stream, 3L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
assertTrue(streamStore.isCold(scope, stream, 3L, null, executor).join());
// no scale event should be posted
assertTrue(writer.queue.isEmpty());
// process third auto scale down event. This should result in a scale op event being posted to merge segments 0, 1
multiplexer.process(new AutoScaleEvent(scope, stream, 0L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
assertTrue(streamStore.isCold(scope, stream, 0L, null, executor).join());
// verify that a new event has been posted
assertEquals(1, writer.queue.size());
ControllerEvent event = writer.queue.take();
assertTrue(event instanceof ScaleOpEvent);
ScaleOpEvent scaleDownEvent1 = (ScaleOpEvent) event;
assertEquals(1, scaleDownEvent1.getNewRanges().size());
assertEquals(2, scaleDownEvent1.getSegmentsToSeal().size());
assertTrue(scaleDownEvent1.getSegmentsToSeal().contains(0L));
assertTrue(scaleDownEvent1.getSegmentsToSeal().contains(1L));
// process fourth auto scale down event. This should result in a scale op event being posted to merge segments 3, 4
multiplexer.process(new AutoScaleEvent(scope, stream, 4L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
assertTrue(streamStore.isCold(scope, stream, 4L, null, executor).join());
// verify that a new event has been posted
assertEquals(1, writer.queue.size());
event = writer.queue.take();
assertTrue(event instanceof ScaleOpEvent);
ScaleOpEvent scaleDownEvent2 = (ScaleOpEvent) event;
assertEquals(1, scaleDownEvent2.getNewRanges().size());
assertEquals(2, scaleDownEvent2.getSegmentsToSeal().size());
assertTrue(scaleDownEvent2.getSegmentsToSeal().contains(3L));
assertTrue(scaleDownEvent2.getSegmentsToSeal().contains(4L));
// process first scale down event, this should submit scale and scale the stream down to 4 segments
multiplexer.process(scaleDownEvent1, () -> false).join();
EpochRecord activeEpoch = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
List<StreamSegmentRecord> segments = activeEpoch.getSegments();
assertEquals(1, activeEpoch.getEpoch());
assertEquals(4, segments.size());
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 2));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 3));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 4));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 5));
// process second scale down event, this should submit scale and scale the stream down to 4 segments
multiplexer.process(scaleDownEvent2, () -> false).join();
// verify that no scale has happened
activeEpoch = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
// verify that no scale has happened.
assertEquals(1, activeEpoch.getEpoch());
assertEquals(4, segments.size());
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 2));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 3));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 4));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 5));
}
use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.
the class ScaleRequestHandlerTest method setup.
@Before
public void setup() throws Exception {
StreamMetrics.initialize();
TransactionMetrics.initialize();
zkServer = new TestingServerStarter().start();
zkServer.start();
zkClient = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), new ExponentialBackoffRetry(20, 1, 50));
zkClient.start();
String hostId;
try {
// On each controller process restart, it gets a fresh hostId,
// which is a combination of hostname and random GUID.
hostId = InetAddress.getLocalHost().getHostAddress() + UUID.randomUUID().toString();
} catch (UnknownHostException e) {
hostId = UUID.randomUUID().toString();
}
streamStore = spy(getStore());
bucketStore = StreamStoreFactory.createZKBucketStore(zkClient, executor);
taskMetadataStore = TaskStoreFactory.createZKStore(zkClient, executor);
connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
clientFactory = mock(EventStreamClientFactory.class);
SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, executor, hostId, GrpcAuthHelper.getDisabledAuthHelper());
streamMetadataTasks.initializeStreamWriters(clientFactory, Config.SCALE_STREAM_NAME);
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, executor, hostId, GrpcAuthHelper.getDisabledAuthHelper());
long createTimestamp = System.currentTimeMillis();
// add a host in zk
// mock pravega
// create a stream
streamStore.createScope(scope, null, executor).get();
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 3)).build();
streamMetadataTasks.createStream(scope, stream, config, createTimestamp, 0L).get();
// set minimum number of segments to 1 so that we can also test scale downs
config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 1)).build();
streamStore.startUpdateConfiguration(scope, stream, config, null, executor).join();
VersionedMetadata<StreamConfigurationRecord> configRecord = streamStore.getConfigurationRecord(scope, stream, null, executor).join();
streamStore.completeUpdateConfiguration(scope, stream, configRecord, null, executor).join();
}
use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.
the class StreamMetadataTasksTest method consumptionBasedRetentionWithNoBounds.
@Test(timeout = 30000)
public void consumptionBasedRetentionWithNoBounds() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final RetentionPolicy retentionPolicy = RetentionPolicy.byTime(Duration.ofMillis(0L), Duration.ofMillis(Long.MAX_VALUE));
String stream1 = "consumptionSize3";
StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
configuration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).retentionPolicy(retentionPolicy).build();
streamStorePartialMock.startUpdateConfiguration(SCOPE, stream1, configuration, null, executor).join();
VersionedMetadata<StreamConfigurationRecord> configRecord = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join();
streamStorePartialMock.completeUpdateConfiguration(SCOPE, stream1, configRecord, null, executor).join();
final Segment seg0 = new Segment(SCOPE, stream1, 0L);
final Segment seg1 = new Segment(SCOPE, stream1, 1L);
ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 0L, seg1, 0L);
Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), startStreamCut));
ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), endStreamCut));
ReaderGroupConfig consumpRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
consumpRGConfig = ReaderGroupConfig.cloneConfig(consumpRGConfig, UUID.randomUUID(), 0L);
doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
String subscriber1 = "subscriber1";
CompletableFuture<Controller.CreateReaderGroupResponse> createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber1, consumpRGConfig, System.currentTimeMillis(), 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
Controller.CreateReaderGroupResponse createResponse1 = createStatus.join();
assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse1.getStatus());
assertEquals(0L, createResponse1.getConfig().getGeneration());
assertFalse(ReaderGroupConfig.DEFAULT_UUID.toString().equals(createResponse1.getConfig().getReaderGroupId()));
String subscriber2 = "subscriber2";
createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber2, consumpRGConfig, System.currentTimeMillis(), 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
Controller.CreateReaderGroupResponse createResponse2 = createStatus.join();
assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse2.getStatus());
assertEquals(0L, createResponse2.getConfig().getGeneration());
assertFalse(ReaderGroupConfig.DEFAULT_UUID.toString().equals(createResponse2.getConfig().getReaderGroupId()));
final String subscriber1Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber1);
final String subscriber2Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber2);
// example::
// | s0 | s2 | s7 |
// | | |
// | | |
// | | | s4 | s6 | s8 | s10
// | s1 | s3 | s5 | | s9 |
// valid stream cuts: { s0/off, s9/off, s2/-1, s8/-1}, { s1/off, s2/-1 }
// lower bound = { s0/off, s1/off }
long two = NameUtils.computeSegmentId(2, 1);
long three = NameUtils.computeSegmentId(3, 1);
long four = NameUtils.computeSegmentId(4, 2);
long five = NameUtils.computeSegmentId(5, 2);
long six = NameUtils.computeSegmentId(6, 3);
long seven = NameUtils.computeSegmentId(7, 4);
long eight = NameUtils.computeSegmentId(8, 4);
long nine = NameUtils.computeSegmentId(9, 4);
long ten = NameUtils.computeSegmentId(10, 5);
// 0, 1 -> 2, 3 with different split
scale(SCOPE, stream1, ImmutableMap.of(0L, 1L, 1L, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.6), new AbstractMap.SimpleEntry<>(0.6, 1.0)));
// s3 -> 4, 5
scale(SCOPE, stream1, ImmutableMap.of(three, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.6, 0.8), new AbstractMap.SimpleEntry<>(0.8, 1.0)));
// 4,5 -> 6
scale(SCOPE, stream1, ImmutableMap.of(four, 1L, five, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.6, 1.0)));
// 2, 6 -> 7, 8, 9
scale(SCOPE, stream1, ImmutableMap.of(two, 1L, six, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.3), new AbstractMap.SimpleEntry<>(0.3, 0.6), new AbstractMap.SimpleEntry<>(0.6, 1.0)));
// 7, 8, 9 -> 10
scale(SCOPE, stream1, ImmutableMap.of(seven, 1L, eight, 1L, nine, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)));
assertNotEquals(0, consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size());
streamMetadataTasks.setRetentionFrequencyMillis(1L);
// invalid streamcut should be rejected
UpdateSubscriberStatus.Status status = streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, three, 1L), 0L).join();
assertEquals(status, UpdateSubscriberStatus.Status.STREAM_CUT_NOT_VALID);
status = streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, two, -1L, eight, -1L, nine, 1L), 0L).join();
status = streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, createResponse2.getConfig().getReaderGroupId(), createResponse2.getConfig().getGeneration(), ImmutableMap.of(1L, 1L, two, -1L), 0L).join();
Map<Long, Long> map1 = new HashMap<>();
map1.put(ten, 2L);
long size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
doReturn(CompletableFuture.completedFuture(new StreamCutRecord(1L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
// call retention and verify that retention policy applies
streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 1L, null, "").join();
// now retention set has one stream cut 10/2
// subscriber lowerbound is 0/1, 1/1.. truncation should happen at lowerbound
VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 1L);
assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 1L);
assertTrue(truncationRecord.getObject().isUpdating());
streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
}
use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.
the class StreamMetadataTasksTest method checkUpdateCompleteTest.
@Test(timeout = 10000)
public void checkUpdateCompleteTest() throws ExecutionException, InterruptedException {
final ScalingPolicy policy = ScalingPolicy.fixed(1);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
String test = "testUpdate";
streamStorePartialMock.createStream(SCOPE, test, configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, test, State.ACTIVE, null, executor).get();
streamMetadataTasks.setRequestEventWriter(new EventStreamWriterMock<>());
// region update
final StreamConfiguration configuration2 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(2)).build();
streamMetadataTasks.updateStream(SCOPE, test, configuration2, 0L);
// wait till configuration is updated
Supplier<Boolean> configUpdated = () -> !streamStorePartialMock.getConfigurationRecord(SCOPE, test, null, executor).join().getObject().isUpdating();
Futures.loop(configUpdated, () -> Futures.delayedFuture(Duration.ofMillis(100), executor), executor).join();
streamStorePartialMock.setState(SCOPE, test, State.UPDATING, null, executor).join();
assertFalse(streamMetadataTasks.isUpdated(SCOPE, test, configuration2, null).get());
VersionedMetadata<StreamConfigurationRecord> configurationRecord = streamStorePartialMock.getConfigurationRecord(SCOPE, test, null, executor).join();
assertTrue(configurationRecord.getObject().isUpdating());
streamStorePartialMock.completeUpdateConfiguration(SCOPE, test, configurationRecord, null, executor).join();
assertFalse(streamMetadataTasks.isUpdated(SCOPE, test, configuration2, null).get());
streamStorePartialMock.setState(SCOPE, test, State.ACTIVE, null, executor).join();
assertTrue(streamMetadataTasks.isUpdated(SCOPE, test, configuration2, null).get());
// start next update with different configuration.
final StreamConfiguration configuration3 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
streamMetadataTasks.updateStream(SCOPE, test, configuration3, 0L);
Futures.loop(configUpdated, () -> Futures.delayedFuture(Duration.ofMillis(100), executor), executor).join();
streamStorePartialMock.setState(SCOPE, test, State.UPDATING, null, executor).join();
// we should still get complete for previous configuration we attempted to update
assertTrue(streamMetadataTasks.isUpdated(SCOPE, test, configuration2, null).get());
assertFalse(streamMetadataTasks.isUpdated(SCOPE, test, configuration3, null).get());
// test update on a sealed stream
String testStream = "testUpdateSealed";
streamStorePartialMock.createStream(SCOPE, testStream, configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, testStream, State.ACTIVE, null, executor).get();
streamMetadataTasks.setRequestEventWriter(new EventStreamWriterMock<>());
final StreamConfiguration configuration4 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(4)).build();
streamMetadataTasks.updateStream(SCOPE, testStream, configuration4, 0L);
// wait till configuration is updated
configUpdated = () -> !streamStorePartialMock.getConfigurationRecord(SCOPE, testStream, null, executor).join().getObject().isUpdating();
Futures.loop(configUpdated, () -> Futures.delayedFuture(Duration.ofMillis(100), executor), executor).join();
configurationRecord = streamStorePartialMock.getConfigurationRecord(SCOPE, testStream, null, executor).join();
assertTrue(configurationRecord.getObject().isUpdating());
streamStorePartialMock.completeUpdateConfiguration(SCOPE, testStream, configurationRecord, null, executor).join();
streamStorePartialMock.setState(SCOPE, testStream, State.SEALED, null, executor).join();
assertFutureThrows("Should throw UnsupportedOperationException", streamMetadataTasks.isUpdated(SCOPE, testStream, configuration4, null), e -> UnsupportedOperationException.class.isAssignableFrom(e.getClass()));
// end region
}
use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.
the class StreamMetadataTasksTest method consumptionBasedRetentionWithScale2.
@Test(timeout = 30000)
public void consumptionBasedRetentionWithScale2() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final RetentionPolicy retentionPolicy = RetentionPolicy.bySizeBytes(0L, 1000L);
String stream1 = "consumptionSize2";
StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
configuration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).retentionPolicy(retentionPolicy).build();
streamStorePartialMock.startUpdateConfiguration(SCOPE, stream1, configuration, null, executor).join();
VersionedMetadata<StreamConfigurationRecord> configRecord = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join();
streamStorePartialMock.completeUpdateConfiguration(SCOPE, stream1, configRecord, null, executor).join();
final Segment seg0 = new Segment(SCOPE, stream1, 0L);
final Segment seg1 = new Segment(SCOPE, stream1, 1L);
ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 0L, seg1, 0L);
Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), startStreamCut));
ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), endStreamCut));
ReaderGroupConfig consumpRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
String subscriber1 = "subscriber1";
CompletableFuture<Controller.CreateReaderGroupResponse> createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber1, consumpRGConfig, System.currentTimeMillis(), 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
Controller.CreateReaderGroupResponse createResponse1 = createStatus.join();
assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse1.getStatus());
String subscriber2 = "subscriber2";
createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber2, consumpRGConfig, System.currentTimeMillis(), 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
Controller.CreateReaderGroupResponse createResponse2 = createStatus.join();
assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse2.getStatus());
final String subscriber1Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber1);
final String subscriber2Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber2);
// example::
// | s0 | s2 | s7 |
// | | |
// | | |
// | | | s4 | s6 | s8 | s10
// | s1 | s3 | s5 | | s9 |
// valid stream cuts: { s0/off, s9/off, s2/-1, s8/-1}, { s1/off, s2/-1 }
// lower bound = { s0/off, s1/off }
long two = NameUtils.computeSegmentId(2, 1);
long three = NameUtils.computeSegmentId(3, 1);
long four = NameUtils.computeSegmentId(4, 2);
long five = NameUtils.computeSegmentId(5, 2);
long six = NameUtils.computeSegmentId(6, 3);
long seven = NameUtils.computeSegmentId(7, 4);
long eight = NameUtils.computeSegmentId(8, 4);
long nine = NameUtils.computeSegmentId(9, 4);
long ten = NameUtils.computeSegmentId(10, 5);
// 0, 1 -> 2, 3 with different split
scale(SCOPE, stream1, ImmutableMap.of(0L, 1L, 1L, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.6), new AbstractMap.SimpleEntry<>(0.6, 1.0)));
// s3 -> 4, 5
scale(SCOPE, stream1, ImmutableMap.of(three, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.6, 0.8), new AbstractMap.SimpleEntry<>(0.8, 1.0)));
// 4,5 -> 6
scale(SCOPE, stream1, ImmutableMap.of(four, 1L, five, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.6, 1.0)));
// 2, 6 -> 7, 8, 9
scale(SCOPE, stream1, ImmutableMap.of(two, 1L, six, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.3), new AbstractMap.SimpleEntry<>(0.3, 0.6), new AbstractMap.SimpleEntry<>(0.6, 1.0)));
// 7, 8, 9 -> 10
scale(SCOPE, stream1, ImmutableMap.of(seven, 1L, eight, 1L, nine, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)));
assertNotEquals(0, consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size());
streamMetadataTasks.setRetentionFrequencyMillis(1L);
// invalid streamcut should be rejected
UpdateSubscriberStatus.Status status = streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, three, 1L), 0L).join();
assertEquals(status, UpdateSubscriberStatus.Status.STREAM_CUT_NOT_VALID);
status = streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, two, -1L, eight, -1L, nine, 1L), 0L).join();
status = streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, createResponse2.getConfig().getReaderGroupId(), createResponse2.getConfig().getGeneration(), ImmutableMap.of(1L, 1L, two, -1L), 0L).join();
Map<Long, Long> map1 = new HashMap<>();
map1.put(ten, 2L);
long size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
doReturn(CompletableFuture.completedFuture(new StreamCutRecord(1L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
// call retention and verify that retention policy applies
streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 1L, null, "").join();
// now retention set has one stream cut 10/2
// subscriber lowerbound is 0/1, 1/1.. trucation should happen at lowerbound
VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 1L);
assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 1L);
assertTrue(truncationRecord.getObject().isUpdating());
streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
}
Aggregations