use of io.pravega.client.stream.impl.StreamCutImpl in project pravega by pravega.
the class ControllerServiceWithStreamTest method createReaderGroupTest.
@Test(timeout = 5000)
public void createReaderGroupTest() throws Exception {
String stream = "stream1";
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
// Start time when stream is created.
long start = System.currentTimeMillis();
// Create scope
Controller.CreateScopeStatus scopeStatus = consumer.createScope(SCOPE, 0L).join();
assertEquals(Controller.CreateScopeStatus.Status.SUCCESS, scopeStatus.getStatus());
// create stream
Controller.CreateStreamStatus streamStatus = consumer.createStream(SCOPE, stream, configuration1, start, 0L).get();
assertEquals(Controller.CreateStreamStatus.Status.SUCCESS, streamStatus.getStatus());
final Segment seg0 = new Segment(SCOPE, "stream1", 0L);
final Segment seg1 = new Segment(SCOPE, "stream1", 1L);
ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 10L, seg1, 10L);
Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, "stream1"), new StreamCutImpl(Stream.of(SCOPE, "stream1"), startStreamCut));
ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 200L, seg1, 300L);
Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, "stream1"), new StreamCutImpl(Stream.of(SCOPE, "stream1"), endStreamCut));
ReaderGroupConfig rgConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
Controller.CreateReaderGroupResponse rgStatus = consumer.createReaderGroup(SCOPE, "rg1", rgConfig, System.currentTimeMillis(), 0L).get();
assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, rgStatus.getStatus());
// there should be 1 invocation
verify(streamStore, times(1)).createReaderGroup(anyString(), anyString(), any(), anyLong(), any(), any());
rgStatus = consumer.createReaderGroup(SCOPE, "rg1", rgConfig, System.currentTimeMillis(), 0L).get();
assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, rgStatus.getStatus());
// verify that create readergroup is not called again
verify(streamStore, times(1)).createReaderGroup(anyString(), anyString(), any(), anyLong(), any(), any());
}
use of io.pravega.client.stream.impl.StreamCutImpl in project pravega by pravega.
the class EndToEndCBRTest method testReaderGroupManualRetention.
@Test(timeout = 60000)
public void testReaderGroupManualRetention() throws Exception {
String scope = "test";
String streamName = "testReaderGroupManualRetention";
String groupName = "testReaderGroupManualRetention-group";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).retentionPolicy(RetentionPolicy.bySizeBytes(10, Long.MAX_VALUE)).build();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
controller.createScope(scope).get();
controller.createStream(scope, streamName, config).get();
Stream stream = Stream.of(scope, streamName);
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
// write events
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
writer.writeEvent("1", "e1").join();
writer.writeEvent("2", "e2").join();
// Create a ReaderGroup
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory);
groupManager.createReaderGroup(groupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().retentionType(ReaderGroupConfig.StreamDataRetention.MANUAL_RELEASE_AT_USER_STREAMCUT).stream(stream).build());
// Create a Reader
AtomicLong clock = new AtomicLong();
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("reader1", groupName, serializer, ReaderConfig.builder().build(), clock::get, clock::get);
clock.addAndGet(CLOCK_ADVANCE_INTERVAL);
EventRead<String> read = reader.readNextEvent(60000);
assertEquals("e1", read.getEvent());
clock.addAndGet(CLOCK_ADVANCE_INTERVAL);
read = reader.readNextEvent(60000);
assertEquals("e2", read.getEvent());
ReaderGroup readerGroup = groupManager.getReaderGroup(groupName);
Map<Segment, Long> segmentMap = new HashMap<>();
segmentMap.put(new Segment(scope, streamName, 0), 17L);
Map<Stream, StreamCut> scResult2 = new HashMap<>();
scResult2.put(stream, new StreamCutImpl(stream, segmentMap));
readerGroup.updateRetentionStreamCut(scResult2);
AssertExtensions.assertEventuallyEquals(true, () -> controller.getSegmentsAtTime(stream, 0L).join().values().stream().anyMatch(off -> off > 0), 30 * 1000L);
String group2 = groupName + "2";
groupManager.createReaderGroup(group2, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(NameUtils.getScopedStreamName(scope, streamName)).build());
EventStreamReader<String> reader2 = clientFactory.createReader("reader2", group2, serializer, ReaderConfig.builder().build());
EventRead<String> eventRead2 = reader2.readNextEvent(10000);
assertEquals("e2", eventRead2.getEvent());
}
use of io.pravega.client.stream.impl.StreamCutImpl in project pravega by pravega.
the class EndToEndReaderGroupTest method testGenerateStreamCuts.
@Test(timeout = 30000)
public void testGenerateStreamCuts() throws Exception {
String streamName = "testGenerateStreamCuts";
final Stream stream = Stream.of(SCOPE, streamName);
final String group = "testGenerateStreamCuts-group";
createScope(SCOPE);
createStream(SCOPE, streamName, ScalingPolicy.fixed(1));
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
// Prep the stream with data.
// 1.Write events with event size of 30
writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(1)).join();
writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(2)).join();
writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(3)).join();
writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(4)).join();
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, PRAVEGA.getControllerURI());
groupManager.createReaderGroup(group, ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(1000).stream(stream).build());
ReaderGroup readerGroup = groupManager.getReaderGroup(group);
// Create a reader
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", group, serializer, ReaderConfig.builder().build());
readAndVerify(reader, 1);
@Cleanup("shutdown") InlineExecutor backgroundExecutor = new InlineExecutor();
CompletableFuture<Map<Stream, StreamCut>> sc = readerGroup.generateStreamCuts(backgroundExecutor);
// The reader group state will be updated after 1 second.
TimeUnit.SECONDS.sleep(1);
EventRead<String> data = reader.readNextEvent(15000);
// wait until the streamCut is obtained.
assertTrue(Futures.await(sc));
// expected segment 0 offset is 30L.
Map<Segment, Long> expectedOffsetMap = ImmutableMap.of(getSegment(streamName, 0, 0), 30L);
Map<Stream, StreamCut> scMap = sc.join();
assertEquals("StreamCut for a single stream expected", 1, scMap.size());
assertEquals("StreamCut pointing ot offset 30L expected", new StreamCutImpl(stream, expectedOffsetMap), scMap.get(stream));
}
use of io.pravega.client.stream.impl.StreamCutImpl in project pravega by pravega.
the class EndToEndReaderGroupTest method testReaderOfflineWithSilentCheckpoint.
@Test(timeout = 30000)
public void testReaderOfflineWithSilentCheckpoint() throws Exception {
String streamName = "testReaderOfflineWithSilentCheckpoint";
final Stream stream = Stream.of(SCOPE, streamName);
final String group = "testReaderOfflineWithSilentCheckpoint-group";
@Cleanup("shutdown") InlineExecutor backgroundExecutor = new InlineExecutor();
createScope(SCOPE);
createStream(SCOPE, streamName, ScalingPolicy.fixed(1));
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
// Prep the stream with data.
// 1.Write events with event size of 30
writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(1)).join();
writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(2)).join();
writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(3)).join();
writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(4)).join();
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, PRAVEGA.getControllerURI());
groupManager.createReaderGroup(group, ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(1000).stream(stream).build());
ReaderGroup readerGroup = groupManager.getReaderGroup(group);
// Create a reader
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", group, serializer, ReaderConfig.builder().build());
// 2. Read an event.
readAndVerify(reader, 1);
// 3. Trigger a checkpoint and verify it is completed.
CompletableFuture<Checkpoint> checkpoint = readerGroup.initiateCheckpoint("chk1", backgroundExecutor);
// The reader group state will be updated after 1 second.
TimeUnit.SECONDS.sleep(1);
EventRead<String> data = reader.readNextEvent(15000);
assertTrue(data.isCheckpoint());
readAndVerify(reader, 2);
assertTrue("Checkpointing should complete successfully", Futures.await(checkpoint));
// 4. GenerateStreamCuts and validate the offset of stream cut.
CompletableFuture<Map<Stream, StreamCut>> sc = readerGroup.generateStreamCuts(backgroundExecutor);
// The reader group state will be updated after 1 second.
TimeUnit.SECONDS.sleep(1);
data = reader.readNextEvent(15000);
assertTrue("StreamCut generation should complete successfully", Futures.await(sc));
// expected segment 0 offset is 60L, since 2 events are read.
Map<Segment, Long> expectedOffsetMap = ImmutableMap.of(getSegment(streamName, 0, 0), 60L);
Map<Stream, StreamCut> scMap = sc.join();
assertEquals("StreamCut for a single stream expected", 1, scMap.size());
assertEquals("StreamCut pointing ot offset 30L expected", new StreamCutImpl(stream, expectedOffsetMap), scMap.get(stream));
// 5. Invoke readerOffline with last position as null. The newer readers should start reading
// from the last checkpointed position
readerGroup.readerOffline("readerId", null);
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId", group, serializer, ReaderConfig.builder().build());
readAndVerify(reader1, 2);
}
use of io.pravega.client.stream.impl.StreamCutImpl in project pravega by pravega.
the class ControllerImplTest method testGetStreamCutSuccessors.
@Test
public void testGetStreamCutSuccessors() throws Exception {
StreamCut from = new StreamCutImpl(new StreamImpl("scope1", "stream1"), Collections.emptyMap());
CompletableFuture<StreamSegmentSuccessors> successors = controllerClient.getSuccessors(from);
assertEquals(2, successors.get().getSegments().size());
}
Aggregations