use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class StreamMetadataTasksTest method consumptionBasedRetentionSizeLimitWithOverlappingMinTest.
@Test(timeout = 30000)
public void consumptionBasedRetentionSizeLimitWithOverlappingMinTest() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final RetentionPolicy retentionPolicy = RetentionPolicy.bySizeBytes(2L, 20L);
String stream1 = "consumptionSizeOverlap";
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
streamMetadataTasks.setRetentionFrequencyMillis(1L);
AtomicLong time = new AtomicLong(0L);
streamMetadataTasks.setRetentionClock(time::get);
final Segment seg0 = new Segment(SCOPE, stream1, 0L);
final Segment seg1 = new Segment(SCOPE, stream1, 1L);
ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 0L, seg1, 0L);
Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), startStreamCut));
ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), endStreamCut));
ReaderGroupConfig consumpRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
consumpRGConfig = ReaderGroupConfig.cloneConfig(consumpRGConfig, UUID.randomUUID(), 0L);
doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
String subscriber1 = "subscriber1";
CompletableFuture<Controller.CreateReaderGroupResponse> createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber1, consumpRGConfig, System.currentTimeMillis(), 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createStatus.join().getStatus());
// create a retention set that has 5 values
// retention policy where min = 2, max = 10.
// s0: seg0/10, seg1/10 ==> size retained if truncated at = 0
// s1: seg0/10, seg1/8 ==> size retained if truncated at = 2 <== min
// s2: seg0/10, seg1/7 ==> size retained if truncated at = 3
// s3: seg0/0, seg1/6 ==> size retained if truncated at = 14
// s4: seg0/0, seg1/5 ==> size retained if truncated at = 15 <== max
time.set(10L);
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 5L, ImmutableMap.of(0L, 0L, 1L, 5L)), null, executor).join();
time.set(20L);
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 6L, ImmutableMap.of(0L, 0L, 1L, 6L)), null, executor).join();
time.set(30L);
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 17L, ImmutableMap.of(0L, 10L, 1L, 7L)), null, executor).join();
time.set(40L);
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 18L, ImmutableMap.of(0L, 10L, 1L, 8L)), null, executor).join();
time.set(50L);
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 20L, ImmutableMap.of(0L, 10L, 1L, 10L)), null, executor).join();
// subscriber streamcut: slb: seg0/9, seg1/10 ==> size retained if truncated at = 1.
// this is less than min. so we should truncate at min. but min overlaps with slb.
// so we should actually truncate at s3 which is the streamcut just before slb.
final String subscriber1Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber1);
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 9L, 1L, 10L), 0L).join();
streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, time.get(), null, "").join();
VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 0L);
assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 6L);
assertTrue(truncationRecord.getObject().isUpdating());
streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class StreamMetadataTasksTest method timeBasedRetentionStreamTest.
@Test(timeout = 30000)
public void timeBasedRetentionStreamTest() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.TIME).retentionParam(Duration.ofMinutes(60).toMillis()).build();
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
doAnswer(x -> CompletableFuture.completedFuture(Collections.emptyList())).when(streamStorePartialMock).listSubscribers(any(), any(), any(), any());
streamStorePartialMock.createStream(SCOPE, "test", configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, "test", State.ACTIVE, null, executor).get();
assertNotEquals(0, consumer.getCurrentSegments(SCOPE, "test", 0L).get().size());
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
AtomicLong time = new AtomicLong(System.currentTimeMillis());
streamMetadataTasks.setRetentionClock(time::get);
long recordingTime1 = time.get();
Map<Long, Long> map1 = new HashMap<>();
map1.put(0L, 1L);
map1.put(1L, 1L);
StreamCutRecord streamCut1 = new StreamCutRecord(recordingTime1, Long.MIN_VALUE, ImmutableMap.copyOf(map1));
doReturn(CompletableFuture.completedFuture(streamCut1)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime1, null, "").get();
// verify that one streamCut is generated and added.
List<StreamCutRecord> list = streamStorePartialMock.getRetentionSet(SCOPE, "test", null, executor).thenCompose(retentionSet -> {
return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
}).join();
assertTrue(list.contains(streamCut1));
Map<Long, Long> map2 = new HashMap<>();
map2.put(0L, 10L);
map2.put(1L, 10L);
long recordingTime2 = recordingTime1 + Duration.ofMinutes(5).toMillis();
StreamCutRecord streamCut2 = new StreamCutRecord(recordingTime2, Long.MIN_VALUE, ImmutableMap.copyOf(map2));
doReturn(CompletableFuture.completedFuture(streamCut2)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), // mock only isTransactionOngoing call.
any());
time.set(recordingTime2);
streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime2, null, "").get();
list = streamStorePartialMock.getRetentionSet(SCOPE, "test", null, executor).thenCompose(retentionSet -> {
return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
}).join();
StreamTruncationRecord truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).get().getObject();
// verify that only one stream cut is in retention set. streamCut2 is not added
// verify that truncation did not happen
assertTrue(list.contains(streamCut1));
assertTrue(!list.contains(streamCut2));
assertTrue(!truncProp.isUpdating());
Map<Long, Long> map3 = new HashMap<>();
map3.put(0L, 20L);
map3.put(1L, 20L);
long recordingTime3 = recordingTime1 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
StreamCutRecord streamCut3 = new StreamCutRecord(recordingTime3, Long.MIN_VALUE, ImmutableMap.copyOf(map3));
doReturn(CompletableFuture.completedFuture(streamCut3)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), // mock only isTransactionOngoing call.
any());
time.set(recordingTime3);
streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime3, null, "").get();
// verify two stream cuts are in retention set. Cut 1 and 3.
// verify that Truncation not not happened.
list = streamStorePartialMock.getRetentionSet(SCOPE, "test", null, executor).thenCompose(retentionSet -> {
return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
}).join();
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).get().getObject();
assertTrue(list.contains(streamCut1));
assertTrue(!list.contains(streamCut2));
assertTrue(list.contains(streamCut3));
assertTrue(!truncProp.isUpdating());
Map<Long, Long> map4 = new HashMap<>();
map4.put(0L, 20L);
map4.put(1L, 20L);
long recordingTime4 = recordingTime1 + retentionPolicy.getRetentionParam() + 2;
StreamCutRecord streamCut4 = new StreamCutRecord(recordingTime4, Long.MIN_VALUE, ImmutableMap.copyOf(map4));
doReturn(CompletableFuture.completedFuture(streamCut4)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
time.set(recordingTime4);
streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime4, null, "").get();
// verify that only two stream cut are in retention set. streamcut 3 and 4
// verify that truncation has started. verify that streamCut1 is removed from retention set as that has been used for truncation
list = streamStorePartialMock.getRetentionSet(SCOPE, "test", null, executor).thenCompose(retentionSet -> {
return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
}).join();
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).get().getObject();
assertTrue(!list.contains(streamCut1));
assertTrue(!list.contains(streamCut2));
assertTrue(list.contains(streamCut3));
assertTrue(list.contains(streamCut4));
assertTrue(truncProp.isUpdating());
assertTrue(truncProp.getStreamCut().get(0L) == 1L && truncProp.getStreamCut().get(1L) == 1L);
doCallRealMethod().when(streamStorePartialMock).listSubscribers(any(), any(), any(), any());
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class ReaderCheckpointTest method generateStreamCutsTest.
@Test
public void generateStreamCutsTest() {
controllerURI = fetchControllerURI();
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
StreamManager streamManager = StreamManager.create(clientConfig);
assertTrue("Creating Scope", streamManager.createScope(SCOPE_2));
assertTrue("Creating stream", streamManager.createStream(SCOPE_2, STREAM, streamConfig));
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(SCOPE_2, clientConfig);
readerGroupManager.createReaderGroup(READER_GROUP_NAME, ReaderGroupConfig.builder().stream(io.pravega.client.stream.Stream.of(SCOPE_2, STREAM)).groupRefreshTimeMillis(GROUP_REFRESH_TIME_MILLIS).build());
@Cleanup ReaderGroup readerGroup = readerGroupManager.getReaderGroup(READER_GROUP_NAME);
int startInclusive = 1;
int endExclusive = 100;
log.info("Write events with range [{},{})", startInclusive, endExclusive);
writeEvents(SCOPE_2, IntStream.range(startInclusive, endExclusive).boxed().collect(Collectors.toList()));
readEventsAndVerify(SCOPE_2, startInclusive, endExclusive);
// Obtain StreamCuts at 100th event.
Map<Stream, StreamCut> cutAt100 = generateStreamCuts(readerGroup);
// Write and read events 100 to 200
startInclusive = 100;
endExclusive = 200;
log.info("Write events with range [{},{})", startInclusive, endExclusive);
writeEvents(SCOPE_2, IntStream.range(startInclusive, endExclusive).boxed().collect(Collectors.toList()));
readEventsAndVerify(SCOPE_2, startInclusive, endExclusive);
// Reset to stream cut pointing to 100th event.
readerGroup.resetReaderGroup(ReaderGroupConfig.builder().startFromStreamCuts(cutAt100).build());
readEventsAndVerify(SCOPE_2, 100, endExclusive);
// Obtain stream cut at 200th event.
Map<Stream, StreamCut> cutAt200 = generateStreamCuts(readerGroup);
// Write and read events 200 to 300.
startInclusive = 200;
endExclusive = 300;
log.info("Write events with range [{},{})", startInclusive, endExclusive);
writeEvents(SCOPE_2, IntStream.range(startInclusive, endExclusive).boxed().collect(Collectors.toList()));
readEventsAndVerify(SCOPE_2, startInclusive, endExclusive);
// Reset back to stream cut pointing to 200th event.
readerGroup.resetReaderGroup(ReaderGroupConfig.builder().startFromStreamCuts(cutAt200).build());
readEventsAndVerify(SCOPE_2, 200, endExclusive);
// Reset back to stream cut pointing to 100th event.
readerGroup.resetReaderGroup(ReaderGroupConfig.builder().startFromStreamCuts(cutAt100).build());
readEventsAndVerify(SCOPE_2, 100, endExclusive);
// clean up
readerGroupManager.deleteReaderGroup(READER_GROUP_NAME);
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class SingleSubscriberUpdateRetentionStreamCutTest method singleSubscriberCBRTest.
@Test
public void singleSubscriberCBRTest() throws Exception {
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(STREAM, new JavaSerializer<>(), EventWriterConfig.builder().build());
// Write a single event.
log.info("Writing event e1 to {}/{}", SCOPE, STREAM);
writer.writeEvent("e1", SIZE_30_EVENT).join();
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(SCOPE, clientConfig);
readerGroupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().retentionType(ReaderGroupConfig.StreamDataRetention.MANUAL_RELEASE_AT_USER_STREAMCUT).disableAutomaticCheckpoints().stream(Stream.of(SCOPE, STREAM)).build());
ReaderGroup readerGroup = readerGroupManager.getReaderGroup(READER_GROUP);
@Cleanup EventStreamReader<String> reader = clientFactory.createReader(READER_GROUP + "-" + 1, READER_GROUP, new JavaSerializer<>(), readerConfig);
// Read one event.
log.info("Reading event e1 from {}/{}", SCOPE, STREAM);
EventRead<String> read = reader.readNextEvent(READ_TIMEOUT);
assertFalse(read.isCheckpoint());
assertEquals("data of size 30", read.getEvent());
// Update the retention stream-cut.
log.info("{} generating stream-cuts for {}/{}", READER_GROUP, SCOPE, STREAM);
CompletableFuture<Map<Stream, StreamCut>> futureCuts = readerGroup.generateStreamCuts(streamCutExecutor);
// Wait for 5 seconds to force reader group state update. This will allow for the silent
// checkpoint event generated as part of generateStreamCuts to be picked and processed.
Exceptions.handleInterrupted(() -> TimeUnit.SECONDS.sleep(5));
EventRead<String> emptyEvent = reader.readNextEvent(READ_TIMEOUT);
assertTrue("Stream-cut generation did not complete", Futures.await(futureCuts, 10_000));
Map<Stream, StreamCut> streamCuts = futureCuts.join();
log.info("{} updating its retention stream-cut to {}", READER_GROUP, streamCuts);
readerGroup.updateRetentionStreamCut(streamCuts);
// Write two more events.
log.info("Writing event e2 to {}/{}", SCOPE, STREAM);
writer.writeEvent("e2", SIZE_30_EVENT).join();
log.info("Writing event e3 to {}/{}", SCOPE, STREAM);
writer.writeEvent("e3", SIZE_30_EVENT).join();
// Check to make sure truncation happened after the first event.
// The timeout is 5 minutes as the retention period is set to 2 minutes. We allow for 2 cycles to fully complete
// and a little longer in order to confirm that the retention has taken place.
AssertExtensions.assertEventuallyEquals("Truncation did not take place at offset 30.", true, () -> controller.getSegmentsAtTime(new StreamImpl(SCOPE, STREAM), 0L).join().values().stream().anyMatch(off -> off >= 30), 1000, 5 * 60 * 1000L);
// Read next event.
log.info("Reading event e2 from {}/{}", SCOPE, STREAM);
read = reader.readNextEvent(READ_TIMEOUT);
assertFalse(read.isCheckpoint());
assertEquals("data of size 30", read.getEvent());
// Update the retention stream-cut.
log.info("{} generating stream-cuts for {}/{}", READER_GROUP, SCOPE, STREAM);
CompletableFuture<Map<Stream, StreamCut>> futureCuts2 = readerGroup.generateStreamCuts(streamCutExecutor);
// Wait for 5 seconds to force reader group state update. This will allow for the silent
// checkpoint event generated as part of generateStreamCuts to be picked and processed.
Exceptions.handleInterrupted(() -> TimeUnit.SECONDS.sleep(5));
EventRead<String> emptyEvent2 = reader.readNextEvent(READ_TIMEOUT);
assertTrue("Stream-cut generation did not complete", Futures.await(futureCuts2, 10_000));
Map<Stream, StreamCut> streamCuts2 = futureCuts2.join();
log.info("{} updating its retention stream-cut to {}", READER_GROUP, streamCuts2);
readerGroup.updateRetentionStreamCut(streamCuts2);
// Check to make sure truncation happened after the second event.
// The timeout is 5 minutes as the retention period is set to 2 minutes. We allow for 2 cycles to fully complete
// and a little longer in order to confirm that the retention has taken place.
AssertExtensions.assertEventuallyEquals("Truncation did not take place at offset 60", true, () -> controller.getSegmentsAtTime(new StreamImpl(SCOPE, STREAM), 0L).join().values().stream().anyMatch(off -> off >= 60), 1000, 5 * 60 * 1000L);
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class BatchClientSimpleTest method batchClientSimpleTest.
/**
* This test verifies the basic functionality of {@link BatchClientFactory}, including stream metadata checks, segment
* counts, parallel segment reads and reads with offsets using stream cuts.
*/
@Test
@SuppressWarnings("deprecation")
public void batchClientSimpleTest() {
final int totalEvents = RG_PARALLELISM * 100;
final int offsetEvents = RG_PARALLELISM * 20;
final int batchIterations = 4;
final Stream stream = Stream.of(SCOPE, STREAM);
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(clientConfig);
ControllerImpl controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), connectionFactory.getInternalExecutor());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionFactory);
@Cleanup BatchClientFactory batchClient = BatchClientFactory.withScope(SCOPE, clientConfig);
log.info("Invoking batchClientSimpleTest test with Controller URI: {}", controllerURI);
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, clientConfig);
groupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(SCOPE + "/" + STREAM).build());
ReaderGroup readerGroup = groupManager.getReaderGroup(READER_GROUP);
log.info("Writing events to stream");
// Write events to the Stream.
writeEvents(clientFactory, STREAM, totalEvents);
// Instantiate readers to consume from Stream up to truncatedEvents.
List<CompletableFuture<Integer>> futures = readEventFutures(clientFactory, READER_GROUP, RG_PARALLELISM, offsetEvents);
Futures.allOf(futures).join();
// Create a stream cut on the specified offset position.
Checkpoint cp = readerGroup.initiateCheckpoint("batchClientCheckpoint", executor).join();
StreamCut streamCut = cp.asImpl().getPositions().values().iterator().next();
// Instantiate the batch client and assert it provides correct stream info.
log.debug("Creating batch client.");
StreamInfo streamInfo = streamManager.getStreamInfo(SCOPE, stream.getStreamName());
log.debug("Validating stream metadata fields.");
assertEquals("Expected Stream name: ", STREAM, streamInfo.getStreamName());
assertEquals("Expected Scope name: ", SCOPE, streamInfo.getScope());
// Test that we can read events from parallel segments from an offset onwards.
log.debug("Reading events from stream cut onwards in parallel.");
List<SegmentRange> ranges = Lists.newArrayList(batchClient.getSegments(stream, streamCut, StreamCut.UNBOUNDED).getIterator());
assertEquals("Expected events read: ", totalEvents - offsetEvents, readFromRanges(ranges, batchClient));
// Emulate the behavior of Hadoop client: i) Get tail of Stream, ii) Read from current point until tail, iii) repeat.
log.debug("Reading in batch iterations.");
StreamCut currentTailStreamCut = streamManager.getStreamInfo(SCOPE, stream.getStreamName()).getTailStreamCut();
int readEvents = 0;
for (int i = 0; i < batchIterations; i++) {
writeEvents(clientFactory, STREAM, totalEvents);
// Read all the existing events in parallel segments from the previous tail to the current one.
ranges = Lists.newArrayList(batchClient.getSegments(stream, currentTailStreamCut, StreamCut.UNBOUNDED).getIterator());
assertEquals("Expected number of segments: ", RG_PARALLELISM, ranges.size());
readEvents += readFromRanges(ranges, batchClient);
log.debug("Events read in parallel so far: {}.", readEvents);
currentTailStreamCut = streamManager.getStreamInfo(SCOPE, stream.getStreamName()).getTailStreamCut();
}
assertEquals("Expected events read: .", totalEvents * batchIterations, readEvents);
// Truncate the stream in first place.
log.debug("Truncating stream at event {}.", offsetEvents);
assertTrue(controller.truncateStream(SCOPE, STREAM, streamCut).join());
// Test the batch client when we select to start reading a Stream from a truncation point.
StreamCut initialPosition = streamManager.getStreamInfo(SCOPE, stream.getStreamName()).getHeadStreamCut();
List<SegmentRange> newRanges = Lists.newArrayList(batchClient.getSegments(stream, initialPosition, StreamCut.UNBOUNDED).getIterator());
assertEquals("Expected events read: ", (totalEvents - offsetEvents) + totalEvents * batchIterations, readFromRanges(newRanges, batchClient));
log.debug("Events correctly read from Stream: simple batch client test passed.");
}
Aggregations