use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class ControllerServiceImplTest method createReaderGroup.
protected void createReaderGroup(String scope, String stream, String rgName, UUID rgId) {
final Segment seg0 = new Segment(scope, stream, 0L);
final Segment seg1 = new Segment(scope, stream, 1L);
ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 10L, seg1, 10L);
Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(scope, stream), new StreamCutImpl(Stream.of(scope, stream), startStreamCut));
ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 200L, seg1, 300L);
Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(scope, stream), new StreamCutImpl(Stream.of(scope, stream), endStreamCut));
ReaderGroupConfig config = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
config = ReaderGroupConfig.cloneConfig(config, rgId, 0L);
ResultObserver<CreateReaderGroupResponse> result = new ResultObserver<>();
this.controllerService.createReaderGroup(ModelHelper.decode(scope, rgName, config), result);
CreateReaderGroupResponse createRGStatus = result.get();
assertEquals("Create Reader Group", CreateReaderGroupResponse.Status.SUCCESS, createRGStatus.getStatus());
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class StreamSeekTest method testStreamSeek.
@Test(timeout = 50000)
public void testStreamSeek() throws Exception {
createScope(SCOPE);
createStream(STREAM1);
createStream(STREAM2);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(controllerUri).build());
@Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM1, serializer, EventWriterConfig.builder().build());
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerUri);
groupManager.createReaderGroup("group", ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(0).stream(Stream.of(SCOPE, STREAM1)).stream(Stream.of(SCOPE, STREAM2)).build());
@Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup("group");
// Prep the stream with data.
// 1.Write two events with event size of 30
writer1.writeEvent(keyGenerator.get(), getEventData.apply(1)).get();
writer1.writeEvent(keyGenerator.get(), getEventData.apply(2)).get();
// 2.Scale stream
Map<Double, Double> newKeyRanges = new HashMap<>();
newKeyRanges.put(0.0, 0.33);
newKeyRanges.put(0.33, 0.66);
newKeyRanges.put(0.66, 1.0);
scaleStream(STREAM1, newKeyRanges);
// 3.Write three events with event size of 30
writer1.writeEvent(keyGenerator.get(), getEventData.apply(3)).get();
writer1.writeEvent(keyGenerator.get(), getEventData.apply(4)).get();
writer1.writeEvent(keyGenerator.get(), getEventData.apply(5)).get();
// Create a reader
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build());
// Offset of a streamCut is always set to zero.
// Stream cut 1
Map<Stream, StreamCut> streamCut1 = readerGroup.getStreamCuts();
readAndVerify(reader, 1, 2);
// Sees the segments are empty prior to scaling
assertNull(reader.readNextEvent(100).getEvent());
// Checkpoint to move past the scale
readerGroup.initiateCheckpoint("cp1", executorService());
// Old segments are released and new ones can be read
readAndVerify(reader, 3, 4, 5);
// Stream cut 2
Map<Stream, StreamCut> streamCut2 = readerGroup.getStreamCuts();
// reset the readers to offset 0.
readerGroup.resetReaderGroup(ReaderGroupConfig.builder().startFromStreamCuts(streamCut1).build());
verifyReinitializationRequiredException(reader);
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build());
// verify that we are at streamCut1
readAndVerify(reader1, 1, 2);
// reset readers to post scale offset 0
readerGroup.resetReaderGroup(ReaderGroupConfig.builder().startFromStreamCuts(streamCut2).build());
verifyReinitializationRequiredException(reader1);
@Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build());
// verify that we are at streamCut2
readAndVerify(reader2, 3, 4, 5);
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class StreamCutsTest method testReaderGroupCuts.
@Test(timeout = 40000)
public void testReaderGroupCuts() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope("test", 0L).get();
controller.createStream("test", "test", config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter("test", new JavaSerializer<>(), EventWriterConfig.builder().build());
writer.writeEvent("0", "fpj was here").get();
writer.writeEvent("0", "fpj was here again").get();
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
groupManager.createReaderGroup("cuts", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/test").groupRefreshTimeMillis(0).build());
@Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup("cuts");
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "cuts", new JavaSerializer<>(), ReaderConfig.builder().initialAllocationDelay(0).build());
EventRead<String> firstEvent = reader.readNextEvent(5000);
assertNotNull(firstEvent.getEvent());
assertEquals("fpj was here", firstEvent.getEvent());
readerGroup.initiateCheckpoint("cp1", executor);
EventRead<String> cpEvent = reader.readNextEvent(5000);
assertEquals("cp1", cpEvent.getCheckpointName());
EventRead<String> secondEvent = reader.readNextEvent(5000);
assertNotNull(secondEvent.getEvent());
assertEquals("fpj was here again", secondEvent.getEvent());
Map<Stream, StreamCut> cuts = readerGroup.getStreamCuts();
validateCuts(readerGroup, cuts, Collections.singleton(getQualifiedStreamSegmentName("test", "test", 0L)));
// Scale the stream to verify that we get more segments in the cut.
Stream stream = Stream.of("test", "test");
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.5);
map.put(0.5, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0L), map, executor).getFuture().get();
assertTrue(result);
log.info("Finished 1st scaling");
writer.writeEvent("0", "fpj was here again0").get();
writer.writeEvent("1", "fpj was here again1").get();
EventRead<String> eosEvent = reader.readNextEvent(100);
// Reader does not yet see the data becasue there has been no CP
assertNull(eosEvent.getEvent());
CompletableFuture<Checkpoint> checkpoint = readerGroup.initiateCheckpoint("cp2", executor);
cpEvent = reader.readNextEvent(100);
EventRead<String> event0 = reader.readNextEvent(100);
EventRead<String> event1 = reader.readNextEvent(100);
cuts = checkpoint.get(5, TimeUnit.SECONDS).asImpl().getPositions();
// Validate the reader did not release the segments before the checkpoint.
// This is important because it means that once the checkpoint is initiated no segments change readers.
Set<String> segmentNames = ImmutableSet.of(getQualifiedStreamSegmentName("test", "test", computeSegmentId(0, 0)));
validateCuts(readerGroup, cuts, segmentNames);
CompletableFuture<Map<Stream, StreamCut>> futureCuts = readerGroup.generateStreamCuts(executor);
EventRead<String> emptyEvent = reader.readNextEvent(100);
cuts = futureCuts.get();
segmentNames = ImmutableSet.of(getQualifiedStreamSegmentName("test", "test", computeSegmentId(1, 1)), getQualifiedStreamSegmentName("test", "test", computeSegmentId(2, 1)));
validateCuts(readerGroup, cuts, segmentNames);
// Scale down to verify that the number drops back.
map = new HashMap<>();
map.put(0.0, 1.0);
ArrayList<Long> toSeal = new ArrayList<>();
toSeal.add(computeSegmentId(1, 1));
toSeal.add(computeSegmentId(2, 1));
result = controller.scaleStream(stream, Collections.unmodifiableList(toSeal), map, executor).getFuture().get();
assertTrue(result);
log.info("Finished 2nd scaling");
writer.writeEvent("0", "fpj was here again2").get();
// Reader sees the segment is empty
emptyEvent = reader.readNextEvent(100);
assertNull(emptyEvent.getEvent());
checkpoint = readerGroup.initiateCheckpoint("cp3", executor);
cpEvent = reader.readNextEvent(100);
assertEquals("cp3", cpEvent.getCheckpointName());
// Reader releases segments here
event0 = reader.readNextEvent(5000);
assertTrue(event0.getEvent().endsWith("2"));
cuts = readerGroup.getStreamCuts();
long three = computeSegmentId(3, 2);
validateCuts(readerGroup, cuts, Collections.singleton(getQualifiedStreamSegmentName("test", "test", three)));
// Scale up to 4 segments again.
map = new HashMap<>();
map.put(0.0, 0.25);
map.put(0.25, 0.5);
map.put(0.5, 0.75);
map.put(0.75, 1.0);
result = controller.scaleStream(stream, Collections.singletonList(three), map, executor).getFuture().get();
assertTrue(result);
log.info("Finished 3rd scaling");
writer.writeEvent("0", "fpj was here again3").get();
// Reader sees the segment is empty
emptyEvent = reader.readNextEvent(100);
assertNull(emptyEvent.getEvent());
readerGroup.initiateCheckpoint("cp4", executor);
cpEvent = reader.readNextEvent(1000);
assertEquals("cp4", cpEvent.getCheckpointName());
// Reader releases segments here
event0 = reader.readNextEvent(5000);
assertNotNull(event0.getEvent());
cuts = readerGroup.getStreamCuts();
segmentNames = new HashSet<>();
long four = computeSegmentId(4, 3);
long five = computeSegmentId(5, 3);
long six = computeSegmentId(6, 3);
long seven = computeSegmentId(7, 3);
segmentNames.add(getQualifiedStreamSegmentName("test", "test", four));
segmentNames.add(getQualifiedStreamSegmentName("test", "test", five));
segmentNames.add(getQualifiedStreamSegmentName("test", "test", six));
segmentNames.add(getQualifiedStreamSegmentName("test", "test", seven));
validateCuts(readerGroup, cuts, Collections.unmodifiableSet(segmentNames));
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class ControllerServiceTest method updateSubscriberStreamCutTest.
private static void updateSubscriberStreamCutTest(Controller controller, final String scope, final String stream) throws InterruptedException, ExecutionException {
// add the first subscriber
Segment seg0 = new Segment(scope, stream, 0L);
Segment seg1 = new Segment(scope, stream, 1L);
ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 10L, seg1, 10L);
Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(scope, stream), new StreamCutImpl(Stream.of(scope, stream), startStreamCut));
ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 200L, seg1, 300L);
Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(scope, stream), new StreamCutImpl(Stream.of(scope, stream), endStreamCut));
final ReaderGroupConfig rgConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
final String rg1 = "rg1";
ReaderGroupConfig createConfig = controller.createReaderGroup(scope, rg1, rgConfig).get();
assertFalse(ReaderGroupConfig.DEFAULT_UUID.equals(createConfig.getReaderGroupId()));
assertEquals(0L, createConfig.getGeneration());
List<String> subs = controller.listSubscribers(scope, stream).get();
assertEquals(1, subs.size());
String subscriber1 = NameUtils.getScopedReaderGroupName(scope, rg1);
assertEquals(subscriber1, subs.get(0));
Stream streamToBeUpdated = Stream.of(scope, stream);
seg0 = new Segment(scope, stream, 0L);
seg1 = new Segment(scope, stream, 1L);
ImmutableMap<Segment, Long> streamCutPositions = ImmutableMap.of(seg0, 1L, seg1, 11L);
StreamCut streamCut = new StreamCutImpl(streamToBeUpdated, streamCutPositions);
assertTrue(controller.updateSubscriberStreamCut(scope, stream, subscriber1, createConfig.getReaderGroupId(), createConfig.getGeneration(), streamCut).get());
ImmutableMap<Segment, Long> streamCutPositionsNew = ImmutableMap.of(seg0, 2L, seg1, 22L);
StreamCut streamCutNew = new StreamCutImpl(streamToBeUpdated, streamCutPositionsNew);
assertTrue(controller.updateSubscriberStreamCut(scope, stream, subscriber1, createConfig.getReaderGroupId(), createConfig.getGeneration(), streamCutNew).get());
}
Aggregations