use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class EndToEndReaderGroupTest method testGenerateStreamCutsWithScaling.
@Test(timeout = 40000)
public void testGenerateStreamCutsWithScaling() throws Exception {
String streamName = "testGenerateStreamCutsWithScaling";
final Stream stream = Stream.of(SCOPE, streamName);
final String group = "testGenerateStreamCutsWithScaling-group";
createScope(SCOPE);
createStream(SCOPE, streamName, ScalingPolicy.fixed(2));
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
// Prep the stream with data.
// 1.Write 2 events with event size of 30 to Segment 0.
writer.writeEvent(keyGenerator.apply("0.1"), getEventData.apply(0)).join();
writer.writeEvent(keyGenerator.apply("0.1"), getEventData.apply(0)).join();
// 2. Write 2 events with event size of 30 to Segment 1.
writer.writeEvent(keyGenerator.apply("0.9"), getEventData.apply(1)).join();
writer.writeEvent(keyGenerator.apply("0.9"), getEventData.apply(1)).join();
// 3. Manually scale stream. Split Segment 0 to Segment 2, Segment 3
Map<Double, Double> newKeyRanges = new HashMap<>();
newKeyRanges.put(0.0, 0.25);
newKeyRanges.put(0.25, 0.5);
newKeyRanges.put(0.5, 1.0);
scaleStream(streamName, newKeyRanges);
// 4. Write events to segment 2
writer.writeEvent(keyGenerator.apply("0.1"), getEventData.apply(2));
// 5. Write events to segment 3
writer.writeEvent(keyGenerator.apply("0.3"), getEventData.apply(3));
// 6. Write events to Segment 1.
writer.writeEvent(keyGenerator.apply("0.9"), getEventData.apply(1));
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, PRAVEGA.getControllerURI());
groupManager.createReaderGroup(group, ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(200).stream(stream).build());
ReaderGroup readerGroup = groupManager.getReaderGroup(group);
// 7. Create two readers and read 1 event from both the readers
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("reader1", group, serializer, ReaderConfig.builder().build());
@Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("reader2", group, serializer, ReaderConfig.builder().build());
// 8. Read 1 event from both the readers.
String reader1Event = reader1.readNextEvent(15000).getEvent();
String reader2Event = reader2.readNextEvent(15000).getEvent();
// 9. Read all events from segment 0.
if (reader1Event.equalsIgnoreCase(getEventData.apply(0))) {
assertEquals(getEventData.apply(0), reader1.readNextEvent(15000).getEvent());
assertEquals(getEventData.apply(1), reader2Event);
readAndVerify(reader2, 1);
} else {
assertEquals(getEventData.apply(1), reader1.readNextEvent(15000).getEvent());
assertEquals(getEventData.apply(0), reader2Event);
readAndVerify(reader2, 0);
}
// Readers see the empty segments
EventRead<String> data = reader2.readNextEvent(100);
assertNull(data.getEvent());
data = reader1.readNextEvent(100);
assertNull(data.getEvent());
@Cleanup("shutdown") InlineExecutor backgroundExecutor = new InlineExecutor();
readerGroup.initiateCheckpoint("cp1", backgroundExecutor);
data = reader1.readNextEvent(5000);
assertEquals("cp1", data.getCheckpointName());
data = reader2.readNextEvent(5000);
assertEquals("cp1", data.getCheckpointName());
// New segments are available to read
reader1Event = reader1.readNextEvent(5000).getEvent();
assertNotNull(reader1Event);
reader2Event = reader2.readNextEvent(5000).getEvent();
assertNotNull(reader2Event);
// 10. Generate StreamCuts
CompletableFuture<Map<Stream, StreamCut>> sc = readerGroup.generateStreamCuts(backgroundExecutor);
// The reader group state will be updated after 1 second.
TimeUnit.SECONDS.sleep(1);
reader1Event = reader1.readNextEvent(500).getEvent();
reader2Event = reader2.readNextEvent(500).getEvent();
// 11 Validate the StreamCut generated.
// wait until the streamCut is obtained.
assertTrue(Futures.await(sc));
Set<Segment> expectedSegments = ImmutableSet.<Segment>builder().add(// 1 event read from segment 1
getSegment(streamName, 4, 1)).add(// 1 event read from segment 2 or 3.
getSegment(streamName, 2, 1)).add(getSegment(streamName, 3, 1)).build();
Map<Stream, StreamCut> scMap = sc.join();
assertEquals("StreamCut for a single stream expected", 1, scMap.size());
assertEquals(expectedSegments, scMap.get(stream).asImpl().getPositions().keySet());
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class ReaderGroupImpl method getUnreadBytes.
private long getUnreadBytes(Map<Stream, Map<Segment, Long>> positions, SegmentMetadataClientFactory metaFactory) {
log.debug("Compute unread bytes from position {}", positions);
long totalLength = 0;
for (Entry<Stream, Map<Segment, Long>> streamPosition : positions.entrySet()) {
StreamCut position = new StreamCutImpl(streamPosition.getKey(), streamPosition.getValue());
totalLength += getRemainingBytes(metaFactory, position);
}
return totalLength;
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class ReaderGroupImpl method resetReadersToCheckpoint.
@SuppressWarnings("deprecation")
@Override
public void resetReadersToCheckpoint(Checkpoint checkpoint) {
@Cleanup StateSynchronizer<ReaderGroupState> synchronizer = createSynchronizer();
synchronizer.updateState(state -> {
ReaderGroupConfig config = state.getConfig();
Map<Segment, Long> positions = new HashMap<>();
for (StreamCut cut : checkpoint.asImpl().getPositions().values()) {
positions.putAll(cut.asImpl().getPositions());
}
return Collections.singletonList(new ReaderGroupStateInit(config, positions));
});
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class ControllerImplTest method testCutpointSuccessors.
@Test
public void testCutpointSuccessors() throws Exception {
String scope = "scope1";
String stream = "stream1";
Stream s = new StreamImpl(scope, stream);
Map<Segment, Long> segments = new HashMap<>();
segments.put(new Segment(scope, stream, 0), 4L);
segments.put(new Segment(scope, stream, 1), 6L);
StreamCut cut = new StreamCutImpl(s, segments);
Set<Segment> successors = controllerClient.getSuccessors(cut).get().getSegments();
assertEquals(ImmutableSet.of(new Segment(scope, stream, 0), new Segment(scope, stream, 1), new Segment(scope, stream, 2), new Segment(scope, stream, 3), new Segment(scope, stream, 4), new Segment(scope, stream, 5), new Segment(scope, stream, 6), new Segment(scope, stream, 7)), successors);
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class ControllerImplTest method testGetSegmentsWithValidStreamCuts.
@Test
public void testGetSegmentsWithValidStreamCuts() throws Exception {
String scope = "scope1";
String stream = "stream1";
Stream s = new StreamImpl(scope, stream);
Map<Segment, Long> startSegments = new HashMap<>();
startSegments.put(new Segment(scope, stream, 0), 4L);
startSegments.put(new Segment(scope, stream, 1), 6L);
StreamCut cut = new StreamCutImpl(s, startSegments);
Map<Segment, Long> endSegments = new HashMap<>();
endSegments.put(new Segment(scope, stream, 6), 10L);
endSegments.put(new Segment(scope, stream, 7), 10L);
StreamCut endSC = new StreamCutImpl(s, endSegments);
Set<Segment> result = controllerClient.getSegments(cut, endSC).get().getSegments();
assertEquals(ImmutableSet.of(new Segment(scope, stream, 0), new Segment(scope, stream, 1), new Segment(scope, stream, 2), new Segment(scope, stream, 3), new Segment(scope, stream, 4), new Segment(scope, stream, 5), new Segment(scope, stream, 6), new Segment(scope, stream, 7)), result);
}
Aggregations