use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class EndToEndTruncationTest method testParallelSegmentOffsetTruncation.
/**
* This test verifies that truncation works specifying an offset that applies to multiple segments. To this end,
* the test first writes a set of events on a Stream (with multiple segments) and truncates it at a specified offset
* (truncatedEvents). The tests asserts that readers gets a TruncatedDataException after truncation and then it
* (only) reads the remaining events that have not been truncated.
*/
@Test(timeout = 600000)
public void testParallelSegmentOffsetTruncation() {
final String scope = "truncationTests";
final String streamName = "testParallelSegmentOffsetTruncation";
final int parallelism = 2;
final int totalEvents = 100;
final int truncatedEvents = 25;
StreamConfiguration streamConf = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(parallelism)).build();
@Cleanup StreamManager streamManager = StreamManager.create(PRAVEGA.getControllerURI());
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(scope, PRAVEGA.getControllerURI());
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
streamManager.createScope(scope);
// Test truncation in new and re-created tests.
for (int i = 0; i < 2; i++) {
final String readerGroupName = "RGTestParallelSegmentOffsetTruncation" + i;
streamManager.createStream(scope, streamName, streamConf);
groupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(scope, streamName)).build());
@Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup(readerGroupName);
// Write events to the Stream.
writeEvents(clientFactory, streamName, totalEvents);
// Instantiate readers to consume from Stream up to truncatedEvents.
List<CompletableFuture<Integer>> futures = ReadWriteUtils.readEvents(clientFactory, readerGroupName, parallelism, truncatedEvents);
Futures.allOf(futures).join();
int eventsReadBeforeTruncation = futures.stream().map(CompletableFuture::join).reduce(Integer::sum).get();
// Perform truncation on stream segment
Checkpoint cp = readerGroup.initiateCheckpoint("myCheckpoint" + i, executorService()).join();
StreamCut streamCut = cp.asImpl().getPositions().values().iterator().next();
assertTrue(streamManager.truncateStream(scope, streamName, streamCut));
// Just after the truncation, trying to read the whole stream should raise a TruncatedDataException.
final String newGroupName = readerGroupName + "new";
groupManager.createReaderGroup(newGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, streamName)).build());
futures = readEvents(clientFactory, newGroupName, parallelism);
Futures.allOf(futures).join();
assertEquals("Expected read events: ", totalEvents - eventsReadBeforeTruncation, (int) futures.stream().map(CompletableFuture::join).reduce((a, b) -> a + b).get());
assertTrue(streamManager.sealStream(scope, streamName));
assertTrue(streamManager.deleteStream(scope, streamName));
}
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class EndToEndReaderGroupTest method testLaggingResetReaderGroup.
@Test(timeout = 30000)
public void testLaggingResetReaderGroup() throws Exception {
StreamConfiguration config = getStreamConfig();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
controller.createScope("test").get();
controller.createStream("test", "testLaggingResetReaderGroup", config).get();
controller.createStream("test", "testLaggingResetReaderGroup2", config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
UUID rgId = UUID.randomUUID();
ReaderGroupConfig rgConf = ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/testLaggingResetReaderGroup").retentionType(ReaderGroupConfig.StreamDataRetention.NONE).build();
rgConf = ReaderGroupConfig.cloneConfig(rgConf, rgId, 0L);
// Create a ReaderGroup
groupManager.createReaderGroup("testLaggingResetReaderGroup-group", rgConf);
ReaderGroupConfig updateConf = ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/testLaggingResetReaderGroup2").retentionType(ReaderGroupConfig.StreamDataRetention.NONE).build();
updateConf = ReaderGroupConfig.cloneConfig(updateConf, rgId, 0L);
// Update from the controller end
controller.updateReaderGroup("test", "testLaggingResetReaderGroup-group", updateConf).join();
ReaderGroup group = groupManager.getReaderGroup("testLaggingResetReaderGroup-group");
// Reset from client end
group.resetReaderGroup(ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/testLaggingResetReaderGroup").build());
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class EndToEndReaderGroupTest method testGenerateStreamCutsWithScaling.
@Test(timeout = 40000)
public void testGenerateStreamCutsWithScaling() throws Exception {
String streamName = "testGenerateStreamCutsWithScaling";
final Stream stream = Stream.of(SCOPE, streamName);
final String group = "testGenerateStreamCutsWithScaling-group";
createScope(SCOPE);
createStream(SCOPE, streamName, ScalingPolicy.fixed(2));
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
// Prep the stream with data.
// 1.Write 2 events with event size of 30 to Segment 0.
writer.writeEvent(keyGenerator.apply("0.1"), getEventData.apply(0)).join();
writer.writeEvent(keyGenerator.apply("0.1"), getEventData.apply(0)).join();
// 2. Write 2 events with event size of 30 to Segment 1.
writer.writeEvent(keyGenerator.apply("0.9"), getEventData.apply(1)).join();
writer.writeEvent(keyGenerator.apply("0.9"), getEventData.apply(1)).join();
// 3. Manually scale stream. Split Segment 0 to Segment 2, Segment 3
Map<Double, Double> newKeyRanges = new HashMap<>();
newKeyRanges.put(0.0, 0.25);
newKeyRanges.put(0.25, 0.5);
newKeyRanges.put(0.5, 1.0);
scaleStream(streamName, newKeyRanges);
// 4. Write events to segment 2
writer.writeEvent(keyGenerator.apply("0.1"), getEventData.apply(2));
// 5. Write events to segment 3
writer.writeEvent(keyGenerator.apply("0.3"), getEventData.apply(3));
// 6. Write events to Segment 1.
writer.writeEvent(keyGenerator.apply("0.9"), getEventData.apply(1));
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, PRAVEGA.getControllerURI());
groupManager.createReaderGroup(group, ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(200).stream(stream).build());
ReaderGroup readerGroup = groupManager.getReaderGroup(group);
// 7. Create two readers and read 1 event from both the readers
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("reader1", group, serializer, ReaderConfig.builder().build());
@Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("reader2", group, serializer, ReaderConfig.builder().build());
// 8. Read 1 event from both the readers.
String reader1Event = reader1.readNextEvent(15000).getEvent();
String reader2Event = reader2.readNextEvent(15000).getEvent();
// 9. Read all events from segment 0.
if (reader1Event.equalsIgnoreCase(getEventData.apply(0))) {
assertEquals(getEventData.apply(0), reader1.readNextEvent(15000).getEvent());
assertEquals(getEventData.apply(1), reader2Event);
readAndVerify(reader2, 1);
} else {
assertEquals(getEventData.apply(1), reader1.readNextEvent(15000).getEvent());
assertEquals(getEventData.apply(0), reader2Event);
readAndVerify(reader2, 0);
}
// Readers see the empty segments
EventRead<String> data = reader2.readNextEvent(100);
assertNull(data.getEvent());
data = reader1.readNextEvent(100);
assertNull(data.getEvent());
@Cleanup("shutdown") InlineExecutor backgroundExecutor = new InlineExecutor();
readerGroup.initiateCheckpoint("cp1", backgroundExecutor);
data = reader1.readNextEvent(5000);
assertEquals("cp1", data.getCheckpointName());
data = reader2.readNextEvent(5000);
assertEquals("cp1", data.getCheckpointName());
// New segments are available to read
reader1Event = reader1.readNextEvent(5000).getEvent();
assertNotNull(reader1Event);
reader2Event = reader2.readNextEvent(5000).getEvent();
assertNotNull(reader2Event);
// 10. Generate StreamCuts
CompletableFuture<Map<Stream, StreamCut>> sc = readerGroup.generateStreamCuts(backgroundExecutor);
// The reader group state will be updated after 1 second.
TimeUnit.SECONDS.sleep(1);
reader1Event = reader1.readNextEvent(500).getEvent();
reader2Event = reader2.readNextEvent(500).getEvent();
// 11 Validate the StreamCut generated.
// wait until the streamCut is obtained.
assertTrue(Futures.await(sc));
Set<Segment> expectedSegments = ImmutableSet.<Segment>builder().add(// 1 event read from segment 1
getSegment(streamName, 4, 1)).add(// 1 event read from segment 2 or 3.
getSegment(streamName, 2, 1)).add(getSegment(streamName, 3, 1)).build();
Map<Stream, StreamCut> scMap = sc.join();
assertEquals("StreamCut for a single stream expected", 1, scMap.size());
assertEquals(expectedSegments, scMap.get(stream).asImpl().getPositions().keySet());
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class EndToEndReaderGroupTest method testReaderOffline.
@Test(timeout = 30000)
public void testReaderOffline() throws Exception {
StreamConfiguration config = getStreamConfig();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
String scopeName = "test";
String streamName = "testReaderOffline";
controller.createScope(scopeName).get();
controller.createStream(scopeName, streamName, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scopeName, controller, connectionFactory);
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(scopeName, controller, clientFactory);
String groupName = "testReaderOffline-group";
groupManager.createReaderGroup(groupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(scopeName + "/" + streamName).build());
final ReaderGroup readerGroup = groupManager.getReaderGroup(groupName);
// create a reader
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("reader1", groupName, new JavaSerializer<>(), ReaderConfig.builder().build());
EventRead<String> eventRead = reader1.readNextEvent(100);
assertNull("Event read should be null since no events are written", eventRead.getEvent());
@Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("reader2", groupName, new JavaSerializer<>(), ReaderConfig.builder().build());
// make reader1 offline
readerGroup.readerOffline("reader1", null);
// write events into the stream.
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build());
writer.writeEvent("0", "data1").get();
writer.writeEvent("0", "data2").get();
eventRead = reader2.readNextEvent(10000);
assertEquals("data1", eventRead.getEvent());
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class EndToEndReaderGroupTest method testMultiScopeReaderGroup.
@Test(timeout = 30000)
public void testMultiScopeReaderGroup() throws Exception {
LocalController controller = (LocalController) PRAVEGA.getLocalController();
// Config of two streams with same name and different scopes.
String defaultScope = "test";
String scopeA = "scopeA";
String scopeB = "scopeB";
String streamName = "testMultiScopeReaderGroup";
// Create Scopes
controller.createScope(defaultScope).join();
controller.createScope(scopeA).join();
controller.createScope(scopeB).join();
// Create Streams.
controller.createStream(scopeA, streamName, getStreamConfig()).join();
controller.createStream(scopeB, streamName, getStreamConfig()).join();
// Create ReaderGroup and reader.
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(defaultScope, controller, connectionFactory);
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(defaultScope, controller, clientFactory);
String groupName = "testMultiScopeReaderGroup-group";
groupManager.createReaderGroup(groupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(scopeA, streamName)).stream(Stream.of(scopeB, streamName)).build());
ReaderGroup readerGroup = groupManager.getReaderGroup(groupName);
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("reader1", groupName, new JavaSerializer<>(), ReaderConfig.builder().build());
// Read empty stream.
EventRead<String> eventRead = reader1.readNextEvent(100);
assertNull("Event read should be null since no events are written", eventRead.getEvent());
// Write to scopeA stream.
writeTestEvent(scopeA, streamName, 0);
eventRead = reader1.readNextEvent(10000);
assertEquals("0", eventRead.getEvent());
// Write to scopeB stream.
writeTestEvent(scopeB, streamName, 1);
eventRead = reader1.readNextEvent(10000);
assertEquals("1", eventRead.getEvent());
// Verify ReaderGroup.getStreamNames().
Set<String> managedStreams = readerGroup.getStreamNames();
assertTrue(managedStreams.contains(Stream.of(scopeA, streamName).getScopedName()));
assertTrue(managedStreams.contains(Stream.of(scopeB, streamName).getScopedName()));
}
Aggregations