use of io.pravega.test.integration.ReadWriteUtils.readEvents in project pravega by pravega.
the class EndToEndTruncationTest method testParallelSegmentOffsetTruncation.
/**
* This test verifies that truncation works specifying an offset that applies to multiple segments. To this end,
* the test first writes a set of events on a Stream (with multiple segments) and truncates it at a specified offset
* (truncatedEvents). The tests asserts that readers gets a TruncatedDataException after truncation and then it
* (only) reads the remaining events that have not been truncated.
*/
@Test(timeout = 600000)
public void testParallelSegmentOffsetTruncation() {
final String scope = "truncationTests";
final String streamName = "testParallelSegmentOffsetTruncation";
final int parallelism = 2;
final int totalEvents = 100;
final int truncatedEvents = 25;
StreamConfiguration streamConf = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(parallelism)).build();
@Cleanup StreamManager streamManager = StreamManager.create(PRAVEGA.getControllerURI());
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(scope, PRAVEGA.getControllerURI());
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
streamManager.createScope(scope);
// Test truncation in new and re-created tests.
for (int i = 0; i < 2; i++) {
final String readerGroupName = "RGTestParallelSegmentOffsetTruncation" + i;
streamManager.createStream(scope, streamName, streamConf);
groupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(scope, streamName)).build());
@Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup(readerGroupName);
// Write events to the Stream.
writeEvents(clientFactory, streamName, totalEvents);
// Instantiate readers to consume from Stream up to truncatedEvents.
List<CompletableFuture<Integer>> futures = ReadWriteUtils.readEvents(clientFactory, readerGroupName, parallelism, truncatedEvents);
Futures.allOf(futures).join();
int eventsReadBeforeTruncation = futures.stream().map(CompletableFuture::join).reduce(Integer::sum).get();
// Perform truncation on stream segment
Checkpoint cp = readerGroup.initiateCheckpoint("myCheckpoint" + i, executorService()).join();
StreamCut streamCut = cp.asImpl().getPositions().values().iterator().next();
assertTrue(streamManager.truncateStream(scope, streamName, streamCut));
// Just after the truncation, trying to read the whole stream should raise a TruncatedDataException.
final String newGroupName = readerGroupName + "new";
groupManager.createReaderGroup(newGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, streamName)).build());
futures = readEvents(clientFactory, newGroupName, parallelism);
Futures.allOf(futures).join();
assertEquals("Expected read events: ", totalEvents - eventsReadBeforeTruncation, (int) futures.stream().map(CompletableFuture::join).reduce((a, b) -> a + b).get());
assertTrue(streamManager.sealStream(scope, streamName));
assertTrue(streamManager.deleteStream(scope, streamName));
}
}
use of io.pravega.test.integration.ReadWriteUtils.readEvents in project pravega by pravega.
the class EndToEndTruncationTest method testDeleteStreamWhileReading.
/**
* This test checks the behavior of a reader (or group of readers) that gets a delete event while reading. While the
* client is reading events (Segment Store) the test deletes the Stream (Controller and metadata). Once the client
* reads all the events and reaches the end of segment, it contacts the Controller to retrieve subsequent segments
* (if any). However, the Stream-related metadata to answer this request has been previously deleted.
*/
// @Ignore //TODO: The controller does not currently handle the stream being deleted properly.
// Once it does so the client will need to throw an appropriate exception, and this test should reflect it.
@Test(timeout = 20000)
public void testDeleteStreamWhileReading() {
final String scope = "truncationTests";
final String streamName = "testDeleteStreamWhileReading";
final String readerGroup = "RGTestDeleteStreamWhileReading";
final int totalEvents = 100;
final int parallelism = 1;
StreamConfiguration streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(parallelism)).build();
@Cleanup StreamManager streamManager = StreamManager.create(PRAVEGA.getControllerURI());
streamManager.createScope(scope);
streamManager.createStream(scope, streamName, streamConfiguration);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
// Write totalEvents to the Stream.
writeEvents(clientFactory, streamName, totalEvents);
// Instantiate readers to consume from Stream.
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(scope, PRAVEGA.getControllerURI());
groupManager.createReaderGroup(readerGroup, ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(500).stream(Stream.of(scope, streamName)).build());
@Cleanup EventStreamReader<String> reader = clientFactory.createReader(String.valueOf(0), readerGroup, new UTF8StringSerializer(), ReaderConfig.builder().build());
assertEquals(totalEvents / 2, ReadWriteUtils.readEventsUntil(reader, eventRead -> true, totalEvents / 2, 0));
reader.close();
val readerRecreated = clientFactory.createReader(String.valueOf(0), readerGroup, new JavaSerializer<>(), ReaderConfig.builder().build());
assertTrue(streamManager.sealStream(scope, streamName));
assertTrue(streamManager.deleteStream(scope, streamName));
assertThrows(InvalidStreamException.class, () -> clientFactory.createReader(String.valueOf(1), readerGroup, new JavaSerializer<>(), ReaderConfig.builder().build()));
// At the control plane, we expect a RetriesExhaustedException as readers try to get successor segments from a deleted stream.
assertThrows(TruncatedDataException.class, () -> ReadWriteUtils.readEvents(readerRecreated, totalEvents / 2, 0));
assertFalse(streamManager.deleteStream(scope, streamName));
}
Aggregations