use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class BatchClientTest method testBatchClientWithStreamTruncationPostGetSegments.
@Test(expected = TruncatedDataException.class, timeout = 50000)
public void testBatchClientWithStreamTruncationPostGetSegments() throws InterruptedException, ExecutionException {
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig);
createTestStreamWithEvents(clientFactory);
@Cleanup BatchClientFactory batchClient = BatchClientFactory.withScope(SCOPE, clientConfig);
// 1. Fetch Segments.
ArrayList<SegmentRange> segmentsPostTruncation = Lists.newArrayList(batchClient.getSegments(Stream.of(SCOPE, STREAM), StreamCut.UNBOUNDED, StreamCut.UNBOUNDED).getIterator());
// 2. Create a StreamCut at the end of segment 0 ( offset = 3 * 30 = 90)
StreamCut streamCut90L = new StreamCutImpl(Stream.of(SCOPE, STREAM), ImmutableMap.of(new Segment(SCOPE, STREAM, 0), 90L));
// 3. Truncate stream.
assertTrue("truncate stream", controllerWrapper.getController().truncateStream(SCOPE, STREAM, streamCut90L).join());
// 4. Use SegmentRange obtained before truncation.
SegmentRange s0 = segmentsPostTruncation.stream().filter(segmentRange -> segmentRange.getSegmentId() == 0L).findFirst().get();
// 5. Read non existent segment.
List<String> eventList = new ArrayList<>();
@Cleanup SegmentIterator<String> segmentIterator = batchClient.readSegment(s0, serializer);
eventList.addAll(Lists.newArrayList(segmentIterator));
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class WatermarkingTest method watermarkingTests.
@Test
public void watermarkingTests() throws Exception {
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(clientConfig);
ControllerImpl controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), connectionFactory.getInternalExecutor());
// create 2 writers
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig);
JavaSerializer<Long> javaSerializer = new JavaSerializer<>();
@Cleanup EventStreamWriter<Long> writer1 = clientFactory.createEventWriter(STREAM, javaSerializer, EventWriterConfig.builder().build());
@Cleanup EventStreamWriter<Long> writer2 = clientFactory.createEventWriter(STREAM, javaSerializer, EventWriterConfig.builder().build());
AtomicBoolean stopFlag = new AtomicBoolean(false);
// write events
writeEvents(writer1, stopFlag);
writeEvents(writer2, stopFlag);
// scale the stream several times so that we get complex positions
Stream streamObj = Stream.of(SCOPE, STREAM);
scale(controller, streamObj);
@Cleanup ClientFactoryImpl syncClientFactory = new ClientFactoryImpl(SCOPE, new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), connectionFactory.getInternalExecutor()), connectionFactory);
String markStream = NameUtils.getMarkStreamForStream(STREAM);
RevisionedStreamClient<Watermark> watermarkReader = syncClientFactory.createRevisionedStreamClient(markStream, new WatermarkSerializer(), SynchronizerConfig.builder().build());
LinkedBlockingQueue<Watermark> watermarks = new LinkedBlockingQueue<>();
fetchWatermarks(watermarkReader, watermarks, stopFlag);
AssertExtensions.assertEventuallyEquals(true, () -> watermarks.size() >= 2, 100000);
// scale down one controller instance.
Futures.getAndHandleExceptions(controllerInstance.scaleService(1), ExecutionException::new);
// wait until at least 2 more watermarks are emitted
AssertExtensions.assertEventuallyEquals(true, () -> watermarks.size() >= 4, 100000);
stopFlag.set(true);
Watermark watermark0 = watermarks.take();
Watermark watermark1 = watermarks.take();
Watermark watermark2 = watermarks.take();
Watermark watermark3 = watermarks.take();
assertTrue(watermark0.getLowerTimeBound() <= watermark0.getUpperTimeBound());
assertTrue(watermark1.getLowerTimeBound() <= watermark1.getUpperTimeBound());
assertTrue(watermark2.getLowerTimeBound() <= watermark2.getUpperTimeBound());
assertTrue(watermark3.getLowerTimeBound() <= watermark3.getUpperTimeBound());
// verify that watermarks are increasing in time.
assertTrue(watermark0.getLowerTimeBound() < watermark1.getLowerTimeBound());
assertTrue(watermark1.getLowerTimeBound() < watermark2.getLowerTimeBound());
assertTrue(watermark2.getLowerTimeBound() < watermark3.getLowerTimeBound());
// use watermark as lower and upper bounds.
Map<Segment, Long> positionMap0 = watermark0.getStreamCut().entrySet().stream().collect(Collectors.toMap(x -> new Segment(SCOPE, STREAM, x.getKey().getSegmentId()), Map.Entry::getValue));
StreamCut streamCutStart = new StreamCutImpl(streamObj, positionMap0);
Map<Stream, StreamCut> start = Collections.singletonMap(streamObj, streamCutStart);
Map<Segment, Long> positionMap2 = watermark2.getStreamCut().entrySet().stream().collect(Collectors.toMap(x -> new Segment(SCOPE, STREAM, x.getKey().getSegmentId()), Map.Entry::getValue));
StreamCut streamCutEnd = new StreamCutImpl(streamObj, positionMap2);
Map<Stream, StreamCut> end = Collections.singletonMap(streamObj, streamCutEnd);
@Cleanup ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(SCOPE, controller, syncClientFactory);
String readerGroup = "rg";
readerGroupManager.createReaderGroup(readerGroup, ReaderGroupConfig.builder().stream(streamObj).startingStreamCuts(start).endingStreamCuts(end).build());
// create reader on the stream
@Cleanup final EventStreamReader<Long> reader = clientFactory.createReader("myreader", readerGroup, javaSerializer, ReaderConfig.builder().build());
// read events from the reader.
// verify that events read belong to the bound
EventRead<Long> event = reader.readNextEvent(10000L);
AtomicReference<TimeWindow> currentTimeWindow = new AtomicReference<>();
AssertExtensions.assertEventuallyEquals(true, () -> {
currentTimeWindow.set(reader.getCurrentTimeWindow(streamObj));
return currentTimeWindow.get() != null && currentTimeWindow.get().getLowerTimeBound() != null && currentTimeWindow.get().getUpperTimeBound() != null;
}, 100000);
log.info("current time window = {}", currentTimeWindow.get());
while (event.getEvent() != null) {
Long time = event.getEvent();
log.info("event read = {}", time);
event.getPosition();
assertTrue(time >= currentTimeWindow.get().getLowerTimeBound());
event = reader.readNextEvent(10000L);
if (event.isCheckpoint()) {
event = reader.readNextEvent(10000L);
}
}
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class EndToEndTruncationTest method testSimpleOffsetTruncation.
/**
* This test checks the basic operation of truncation with offsets. The test first writes two events on a Stream
* (1 segment) and then truncates the Stream after the first event. We verify that a new reader first gets a
* TruncatedDataException and then it reads only the second event written, as the first has been truncated.
*
* @throws ReinitializationRequiredException If a checkpoint or reset is performed on the reader group.
*/
@Test(timeout = 30000)
public void testSimpleOffsetTruncation() throws ReinitializationRequiredException {
final String scope = "truncationTests";
final String streamName = "testSimpleOffsetTruncation";
final String readerGroupName = "RGTestSimpleOffsetTruncation";
StreamConfiguration streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
@Cleanup StreamManager streamManager = StreamManager.create(PRAVEGA.getControllerURI());
streamManager.createScope(scope);
streamManager.createStream(scope, streamName, streamConfiguration);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(scope, PRAVEGA.getControllerURI());
groupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(scope + "/" + streamName).build());
@Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup(readerGroupName);
// Write two events to the Stream.
writeEvents(clientFactory, streamName, 2);
// Read only the first one.
@Cleanup EventStreamReader<String> reader = clientFactory.createReader(readerGroupName + "1", readerGroupName, new UTF8StringSerializer(), ReaderConfig.builder().build());
assertEquals(reader.readNextEvent(5000).getEvent(), "0");
reader.close();
// Create a Checkpoint, get StreamCut and truncate the Stream at that point.
Checkpoint cp = readerGroup.initiateCheckpoint("myCheckpoint", executorService()).join();
StreamCut streamCut = cp.asImpl().getPositions().values().iterator().next();
assertTrue(streamManager.truncateStream(scope, streamName, streamCut));
// Verify that a new reader reads from event 1 onwards.
final String newReaderGroupName = readerGroupName + "new";
groupManager.createReaderGroup(newReaderGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, streamName)).build());
@Cleanup final EventStreamReader<String> newReader = clientFactory.createReader(newReaderGroupName + "2", newReaderGroupName, new UTF8StringSerializer(), ReaderConfig.builder().build());
assertEquals("Expected read event: ", "1", newReader.readNextEvent(5000).getEvent());
assertNull(newReader.readNextEvent(5000).getEvent());
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class EndToEndTruncationTest method testParallelSegmentOffsetTruncation.
/**
* This test verifies that truncation works specifying an offset that applies to multiple segments. To this end,
* the test first writes a set of events on a Stream (with multiple segments) and truncates it at a specified offset
* (truncatedEvents). The tests asserts that readers gets a TruncatedDataException after truncation and then it
* (only) reads the remaining events that have not been truncated.
*/
@Test(timeout = 600000)
public void testParallelSegmentOffsetTruncation() {
final String scope = "truncationTests";
final String streamName = "testParallelSegmentOffsetTruncation";
final int parallelism = 2;
final int totalEvents = 100;
final int truncatedEvents = 25;
StreamConfiguration streamConf = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(parallelism)).build();
@Cleanup StreamManager streamManager = StreamManager.create(PRAVEGA.getControllerURI());
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(scope, PRAVEGA.getControllerURI());
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
streamManager.createScope(scope);
// Test truncation in new and re-created tests.
for (int i = 0; i < 2; i++) {
final String readerGroupName = "RGTestParallelSegmentOffsetTruncation" + i;
streamManager.createStream(scope, streamName, streamConf);
groupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(scope, streamName)).build());
@Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup(readerGroupName);
// Write events to the Stream.
writeEvents(clientFactory, streamName, totalEvents);
// Instantiate readers to consume from Stream up to truncatedEvents.
List<CompletableFuture<Integer>> futures = ReadWriteUtils.readEvents(clientFactory, readerGroupName, parallelism, truncatedEvents);
Futures.allOf(futures).join();
int eventsReadBeforeTruncation = futures.stream().map(CompletableFuture::join).reduce(Integer::sum).get();
// Perform truncation on stream segment
Checkpoint cp = readerGroup.initiateCheckpoint("myCheckpoint" + i, executorService()).join();
StreamCut streamCut = cp.asImpl().getPositions().values().iterator().next();
assertTrue(streamManager.truncateStream(scope, streamName, streamCut));
// Just after the truncation, trying to read the whole stream should raise a TruncatedDataException.
final String newGroupName = readerGroupName + "new";
groupManager.createReaderGroup(newGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, streamName)).build());
futures = readEvents(clientFactory, newGroupName, parallelism);
Futures.allOf(futures).join();
assertEquals("Expected read events: ", totalEvents - eventsReadBeforeTruncation, (int) futures.stream().map(CompletableFuture::join).reduce((a, b) -> a + b).get());
assertTrue(streamManager.sealStream(scope, streamName));
assertTrue(streamManager.deleteStream(scope, streamName));
}
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class EndToEndReaderGroupTest method testGenerateStreamCutsWithScaling.
@Test(timeout = 40000)
public void testGenerateStreamCutsWithScaling() throws Exception {
String streamName = "testGenerateStreamCutsWithScaling";
final Stream stream = Stream.of(SCOPE, streamName);
final String group = "testGenerateStreamCutsWithScaling-group";
createScope(SCOPE);
createStream(SCOPE, streamName, ScalingPolicy.fixed(2));
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
// Prep the stream with data.
// 1.Write 2 events with event size of 30 to Segment 0.
writer.writeEvent(keyGenerator.apply("0.1"), getEventData.apply(0)).join();
writer.writeEvent(keyGenerator.apply("0.1"), getEventData.apply(0)).join();
// 2. Write 2 events with event size of 30 to Segment 1.
writer.writeEvent(keyGenerator.apply("0.9"), getEventData.apply(1)).join();
writer.writeEvent(keyGenerator.apply("0.9"), getEventData.apply(1)).join();
// 3. Manually scale stream. Split Segment 0 to Segment 2, Segment 3
Map<Double, Double> newKeyRanges = new HashMap<>();
newKeyRanges.put(0.0, 0.25);
newKeyRanges.put(0.25, 0.5);
newKeyRanges.put(0.5, 1.0);
scaleStream(streamName, newKeyRanges);
// 4. Write events to segment 2
writer.writeEvent(keyGenerator.apply("0.1"), getEventData.apply(2));
// 5. Write events to segment 3
writer.writeEvent(keyGenerator.apply("0.3"), getEventData.apply(3));
// 6. Write events to Segment 1.
writer.writeEvent(keyGenerator.apply("0.9"), getEventData.apply(1));
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, PRAVEGA.getControllerURI());
groupManager.createReaderGroup(group, ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(200).stream(stream).build());
ReaderGroup readerGroup = groupManager.getReaderGroup(group);
// 7. Create two readers and read 1 event from both the readers
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("reader1", group, serializer, ReaderConfig.builder().build());
@Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("reader2", group, serializer, ReaderConfig.builder().build());
// 8. Read 1 event from both the readers.
String reader1Event = reader1.readNextEvent(15000).getEvent();
String reader2Event = reader2.readNextEvent(15000).getEvent();
// 9. Read all events from segment 0.
if (reader1Event.equalsIgnoreCase(getEventData.apply(0))) {
assertEquals(getEventData.apply(0), reader1.readNextEvent(15000).getEvent());
assertEquals(getEventData.apply(1), reader2Event);
readAndVerify(reader2, 1);
} else {
assertEquals(getEventData.apply(1), reader1.readNextEvent(15000).getEvent());
assertEquals(getEventData.apply(0), reader2Event);
readAndVerify(reader2, 0);
}
// Readers see the empty segments
EventRead<String> data = reader2.readNextEvent(100);
assertNull(data.getEvent());
data = reader1.readNextEvent(100);
assertNull(data.getEvent());
@Cleanup("shutdown") InlineExecutor backgroundExecutor = new InlineExecutor();
readerGroup.initiateCheckpoint("cp1", backgroundExecutor);
data = reader1.readNextEvent(5000);
assertEquals("cp1", data.getCheckpointName());
data = reader2.readNextEvent(5000);
assertEquals("cp1", data.getCheckpointName());
// New segments are available to read
reader1Event = reader1.readNextEvent(5000).getEvent();
assertNotNull(reader1Event);
reader2Event = reader2.readNextEvent(5000).getEvent();
assertNotNull(reader2Event);
// 10. Generate StreamCuts
CompletableFuture<Map<Stream, StreamCut>> sc = readerGroup.generateStreamCuts(backgroundExecutor);
// The reader group state will be updated after 1 second.
TimeUnit.SECONDS.sleep(1);
reader1Event = reader1.readNextEvent(500).getEvent();
reader2Event = reader2.readNextEvent(500).getEvent();
// 11 Validate the StreamCut generated.
// wait until the streamCut is obtained.
assertTrue(Futures.await(sc));
Set<Segment> expectedSegments = ImmutableSet.<Segment>builder().add(// 1 event read from segment 1
getSegment(streamName, 4, 1)).add(// 1 event read from segment 2 or 3.
getSegment(streamName, 2, 1)).add(getSegment(streamName, 3, 1)).build();
Map<Stream, StreamCut> scMap = sc.join();
assertEquals("StreamCut for a single stream expected", 1, scMap.size());
assertEquals(expectedSegments, scMap.get(stream).asImpl().getPositions().keySet());
}
Aggregations