use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class MetadataScalabilityTest method truncation.
void truncation(ControllerImpl controller, List<List<Segment>> listOfEpochs) {
int numSegments = getStreamConfig().getScalingPolicy().getMinNumSegments();
int scalesToPerform = getScalesToPerform();
Stream stream = new StreamImpl(SCOPE, getStreamName());
// try SCALES_TO_PERFORM randomly generated stream cuts and truncate stream at those
// stream cuts.
List<AtomicInteger> indexes = new LinkedList<>();
Random rand = new Random();
for (int i = 0; i < numSegments; i++) {
indexes.add(new AtomicInteger(1));
}
Futures.loop(() -> indexes.stream().allMatch(x -> x.get() < scalesToPerform - 1), () -> {
// We randomly generate a stream cut in each iteration of this loop. A valid stream
// cut in this scenario contains for each position i in [0, numSegments -1], a segment
// from one of the scale epochs of the stream. For each position i, we randomly
// choose an epoch and pick the segment at position i. It increments the epoch
// index accordingly (indexes list) so that in the next iteration it chooses a later
// epoch for the same i.
//
// Because the segment in position i always contain the range [d * (i-1), d * i],
// where d = 1 / (number of segments), the stream cut is guaranteed to cover
// the entire key space.
Map<Segment, Long> map = new HashMap<>();
for (int i = 0; i < numSegments; i++) {
AtomicInteger index = indexes.get(i);
index.set(index.get() + rand.nextInt(scalesToPerform - index.get()));
map.put(listOfEpochs.get(index.get()).get(i), 0L);
}
StreamCut cut = new StreamCutImpl(stream, map);
log.info("truncating stream at {}", map);
return controller.truncateStream(SCOPE, streamName, cut).thenCompose(truncated -> {
log.info("stream truncated successfully at {}", cut);
assertTrue(truncated);
// we will just validate that a non empty value is returned.
return controller.getSuccessors(cut).thenAccept(successors -> {
assertTrue(successors.getSegments().size() > 0);
log.info("Successors for streamcut {} are {}", cut, successors);
});
});
}, executorService).join();
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class OffsetTruncationTest method offsetTruncationTest.
/**
* This test verifies that truncation works specifying an offset that applies to multiple segments. To this end,
* the test first writes a set of events on a Stream (with multiple segments) and truncates it at a specified offset
* (truncatedEvents). The tests asserts that readers first get a TruncatedDataException as they are attempting to
* read a truncated segment, and then they only read the remaining events that have not been truncated.
*/
@Test
public void offsetTruncationTest() {
final int totalEvents = 200;
final int truncatedEvents = 50;
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(clientConfig);
ControllerImpl controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), connectionFactory.getInternalExecutor());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionFactory);
log.info("Invoking offsetTruncationTest test with Controller URI: {}", controllerURI);
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, clientConfig);
groupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().stream(Stream.of(SCOPE, STREAM)).build());
@Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup(READER_GROUP);
// Write events to the Stream.
writeEvents(clientFactory, STREAM, totalEvents);
// Instantiate readers to consume from Stream up to truncatedEvents.
List<CompletableFuture<Integer>> futures = readEventFutures(clientFactory, READER_GROUP, PARALLELISM, truncatedEvents);
Futures.allOf(futures).join();
// Ensure that we have read all the events required before initiating the checkpoint.
assertEquals("Number of events read is not the expected one.", (Integer) truncatedEvents, futures.stream().map(f -> Futures.getAndHandleExceptions(f, RuntimeException::new)).reduce(Integer::sum).get());
// Perform truncation on stream segment.
Checkpoint cp = readerGroup.initiateCheckpoint("truncationCheckpoint", executor).join();
StreamCut streamCut = cp.asImpl().getPositions().values().iterator().next();
StreamCut alternativeStreamCut = readerGroup.generateStreamCuts(executor).join().get(Stream.of(SCOPE, STREAM));
assertEquals("StreamCuts for reader group differ depending on how they are generated.", streamCut, alternativeStreamCut);
assertTrue(streamManager.truncateStream(SCOPE, STREAM, streamCut));
// Just after the truncation, read events from the offset defined in truncate call onwards.
final String newGroupName = READER_GROUP + "new";
groupManager.createReaderGroup(newGroupName, ReaderGroupConfig.builder().stream(Stream.of(SCOPE, STREAM)).build());
futures = readEventFutures(clientFactory, newGroupName, PARALLELISM);
Futures.allOf(futures).join();
assertEquals("Expected read events: ", totalEvents - truncatedEvents, (int) futures.stream().map(CompletableFuture::join).reduce(Integer::sum).get());
log.debug("The stream has been successfully truncated at event {}. Offset truncation test passed.", truncatedEvents);
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class ReaderGroupStreamCutUpdateTest method testStreamcutsUpdateInReaderGroup.
@Test(timeout = 60000)
public void testStreamcutsUpdateInReaderGroup() throws Exception {
final String scope = "testStreamcutsUpdateInReaderGroup";
final String stream = "myStream";
final String readerGroupName = "testStreamcutsUpdateInReaderGroupRG";
final int checkpointingIntervalMs = 2000;
final int readerSleepInterval = 250;
final int numEvents = 100;
// First, create the stream.
@Cleanup StreamManager streamManager = StreamManager.create(controllerURI);
Assert.assertTrue(streamManager.createScope(scope));
StreamConfiguration streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(2)).build();
streamManager.createStream(scope, stream, streamConfiguration);
// Write some events in the stream.
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build());
writeEvents(clientFactory, stream, numEvents);
// Read the events and test that positions are getting updated.
ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder().stream(Stream.of(scope, stream)).automaticCheckpointIntervalMillis(checkpointingIntervalMs).build();
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(scope, controllerURI);
readerGroupManager.createReaderGroup(readerGroupName, readerGroupConfig);
ReaderGroup readerGroup = readerGroupManager.getReaderGroup(readerGroupName);
@Cleanup EventStreamReader<Double> reader = clientFactory.createReader("myReader", readerGroupName, new JavaSerializer<>(), ReaderConfig.builder().build());
Map<Stream, StreamCut> currentStreamcuts = readerGroup.getStreamCuts();
EventRead<Double> eventRead;
int lastIteration = 0, iteration = 0;
int assertionFrequency = checkpointingIntervalMs / readerSleepInterval;
do {
eventRead = reader.readNextEvent(5000);
// Check that the streamcuts are being updated periodically via automatic reader group checkpoints.
if (iteration != lastIteration && iteration % assertionFrequency == 0) {
log.info("Comparing streamcuts: {} / {} in iteration {}.", currentStreamcuts, readerGroup.getStreamCuts(), iteration);
Assert.assertNotEquals(currentStreamcuts, readerGroup.getStreamCuts());
currentStreamcuts = readerGroup.getStreamCuts();
lastIteration = iteration;
}
Thread.sleep(readerSleepInterval);
if (!eventRead.isCheckpoint()) {
iteration++;
}
} while ((eventRead.isCheckpoint() || eventRead.getEvent() != null) && iteration < numEvents);
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class RestoreBackUpDataRecoveryTest method getStreamCutsFromWaterMarks.
private List<Map<Stream, StreamCut>> getStreamCutsFromWaterMarks(Stream streamObj, LinkedBlockingQueue<Watermark> watermarks) throws InterruptedException {
Watermark watermark0 = watermarks.take();
Watermark watermark1 = watermarks.take();
assertTrue(watermark0.getLowerTimeBound() <= watermark0.getUpperTimeBound());
assertTrue(watermark1.getLowerTimeBound() <= watermark1.getUpperTimeBound());
assertTrue(watermark0.getLowerTimeBound() < watermark1.getLowerTimeBound());
Map<Segment, Long> positionMap0 = watermark0.getStreamCut().entrySet().stream().collect(Collectors.toMap(x -> new Segment(SCOPE, STREAM1, x.getKey().getSegmentId()), Map.Entry::getValue));
Map<Segment, Long> positionMap1 = watermark1.getStreamCut().entrySet().stream().collect(Collectors.toMap(x -> new Segment(SCOPE, STREAM1, x.getKey().getSegmentId()), Map.Entry::getValue));
StreamCut streamCutFirst = new StreamCutImpl(streamObj, positionMap0);
StreamCut streamCutSecond = new StreamCutImpl(streamObj, positionMap1);
Map<Stream, StreamCut> firstMarkStreamCut = Collections.singletonMap(streamObj, streamCutFirst);
Map<Stream, StreamCut> secondMarkStreamCut = Collections.singletonMap(streamObj, streamCutSecond);
return Arrays.asList(firstMarkStreamCut, secondMarkStreamCut);
}
use of io.pravega.client.stream.StreamCut in project pravega by pravega.
the class WatermarkingTest method watermarkTest.
@Test(timeout = 120000)
public void watermarkTest() throws Exception {
Controller controller = PRAVEGA.getLocalController();
String scope = "scope";
String stream = "watermarkTest";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(5)).build();
ClientConfig clientConfig = ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build();
@Cleanup StreamManager streamManager = StreamManager.create(clientConfig);
streamManager.createScope(scope);
streamManager.createStream(scope, stream, config);
Stream streamObj = Stream.of(scope, stream);
// create 2 writers
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, clientConfig);
JavaSerializer<Long> javaSerializer = new JavaSerializer<>();
@Cleanup EventStreamWriter<Long> writer1 = clientFactory.createEventWriter(stream, javaSerializer, EventWriterConfig.builder().build());
@Cleanup EventStreamWriter<Long> writer2 = clientFactory.createEventWriter(stream, javaSerializer, EventWriterConfig.builder().build());
AtomicBoolean stopFlag = new AtomicBoolean(false);
// write events
CompletableFuture<Void> writer1Future = writeEvents(writer1, stopFlag);
CompletableFuture<Void> writer2Future = writeEvents(writer2, stopFlag);
// scale the stream several times so that we get complex positions
scale(controller, streamObj, config);
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(clientConfig);
@Cleanup ClientFactoryImpl syncClientFactory = new ClientFactoryImpl(scope, new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), connectionFactory.getInternalExecutor()), connectionFactory);
String markStream = NameUtils.getMarkStreamForStream(stream);
@Cleanup RevisionedStreamClient<Watermark> watermarkReader = syncClientFactory.createRevisionedStreamClient(markStream, new WatermarkSerializer(), SynchronizerConfig.builder().build());
LinkedBlockingQueue<Watermark> watermarks = new LinkedBlockingQueue<>();
fetchWatermarks(watermarkReader, watermarks, stopFlag);
AssertExtensions.assertEventuallyEquals(true, () -> watermarks.size() >= 2, 100000);
stopFlag.set(true);
writer1Future.join();
writer2Future.join();
// read events from the stream
@Cleanup ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(scope, controller, syncClientFactory);
Watermark watermark0 = watermarks.take();
Watermark watermark1 = watermarks.take();
assertTrue(watermark0.getLowerTimeBound() <= watermark0.getUpperTimeBound());
assertTrue(watermark1.getLowerTimeBound() <= watermark1.getUpperTimeBound());
assertTrue(watermark0.getLowerTimeBound() < watermark1.getLowerTimeBound());
Map<Segment, Long> positionMap0 = watermark0.getStreamCut().entrySet().stream().collect(Collectors.toMap(x -> new Segment(scope, stream, x.getKey().getSegmentId()), Map.Entry::getValue));
Map<Segment, Long> positionMap1 = watermark1.getStreamCut().entrySet().stream().collect(Collectors.toMap(x -> new Segment(scope, stream, x.getKey().getSegmentId()), Map.Entry::getValue));
StreamCut streamCutFirst = new StreamCutImpl(streamObj, positionMap0);
StreamCut streamCutSecond = new StreamCutImpl(streamObj, positionMap1);
Map<Stream, StreamCut> firstMarkStreamCut = Collections.singletonMap(streamObj, streamCutFirst);
Map<Stream, StreamCut> secondMarkStreamCut = Collections.singletonMap(streamObj, streamCutSecond);
// read from stream cut of first watermark
String readerGroup = "watermarkTest-group";
readerGroupManager.createReaderGroup(readerGroup, ReaderGroupConfig.builder().stream(streamObj).startingStreamCuts(firstMarkStreamCut).endingStreamCuts(secondMarkStreamCut).disableAutomaticCheckpoints().build());
@Cleanup final EventStreamReader<Long> reader = clientFactory.createReader("myreader", readerGroup, javaSerializer, ReaderConfig.builder().build());
EventRead<Long> event = reader.readNextEvent(10000L);
TimeWindow currentTimeWindow = reader.getCurrentTimeWindow(streamObj);
while (event.getEvent() != null && currentTimeWindow.getLowerTimeBound() == null && currentTimeWindow.getUpperTimeBound() == null) {
event = reader.readNextEvent(10000L);
currentTimeWindow = reader.getCurrentTimeWindow(streamObj);
}
assertNotNull(currentTimeWindow.getUpperTimeBound());
// read all events and verify that all events are below the bounds
while (event.getEvent() != null) {
Long time = event.getEvent();
log.info("timewindow = {} event = {}", currentTimeWindow, time);
assertTrue(currentTimeWindow.getLowerTimeBound() == null || time >= currentTimeWindow.getLowerTimeBound());
assertTrue(currentTimeWindow.getUpperTimeBound() == null || time <= currentTimeWindow.getUpperTimeBound());
TimeWindow nextTimeWindow = reader.getCurrentTimeWindow(streamObj);
assertTrue(currentTimeWindow.getLowerTimeBound() == null || nextTimeWindow.getLowerTimeBound() >= currentTimeWindow.getLowerTimeBound());
assertTrue(currentTimeWindow.getUpperTimeBound() == null || nextTimeWindow.getUpperTimeBound() >= currentTimeWindow.getUpperTimeBound());
currentTimeWindow = nextTimeWindow;
event = reader.readNextEvent(10000L);
if (event.isCheckpoint()) {
event = reader.readNextEvent(10000L);
}
}
assertNotNull(currentTimeWindow.getLowerTimeBound());
}
Aggregations