use of io.pravega.client.control.impl.ControllerImpl in project pravega by pravega.
the class WatermarkingTest method watermarkingTests.
@Test
public void watermarkingTests() throws Exception {
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(clientConfig);
ControllerImpl controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), connectionFactory.getInternalExecutor());
// create 2 writers
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig);
JavaSerializer<Long> javaSerializer = new JavaSerializer<>();
@Cleanup EventStreamWriter<Long> writer1 = clientFactory.createEventWriter(STREAM, javaSerializer, EventWriterConfig.builder().build());
@Cleanup EventStreamWriter<Long> writer2 = clientFactory.createEventWriter(STREAM, javaSerializer, EventWriterConfig.builder().build());
AtomicBoolean stopFlag = new AtomicBoolean(false);
// write events
writeEvents(writer1, stopFlag);
writeEvents(writer2, stopFlag);
// scale the stream several times so that we get complex positions
Stream streamObj = Stream.of(SCOPE, STREAM);
scale(controller, streamObj);
@Cleanup ClientFactoryImpl syncClientFactory = new ClientFactoryImpl(SCOPE, new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), connectionFactory.getInternalExecutor()), connectionFactory);
String markStream = NameUtils.getMarkStreamForStream(STREAM);
RevisionedStreamClient<Watermark> watermarkReader = syncClientFactory.createRevisionedStreamClient(markStream, new WatermarkSerializer(), SynchronizerConfig.builder().build());
LinkedBlockingQueue<Watermark> watermarks = new LinkedBlockingQueue<>();
fetchWatermarks(watermarkReader, watermarks, stopFlag);
AssertExtensions.assertEventuallyEquals(true, () -> watermarks.size() >= 2, 100000);
// scale down one controller instance.
Futures.getAndHandleExceptions(controllerInstance.scaleService(1), ExecutionException::new);
// wait until at least 2 more watermarks are emitted
AssertExtensions.assertEventuallyEquals(true, () -> watermarks.size() >= 4, 100000);
stopFlag.set(true);
Watermark watermark0 = watermarks.take();
Watermark watermark1 = watermarks.take();
Watermark watermark2 = watermarks.take();
Watermark watermark3 = watermarks.take();
assertTrue(watermark0.getLowerTimeBound() <= watermark0.getUpperTimeBound());
assertTrue(watermark1.getLowerTimeBound() <= watermark1.getUpperTimeBound());
assertTrue(watermark2.getLowerTimeBound() <= watermark2.getUpperTimeBound());
assertTrue(watermark3.getLowerTimeBound() <= watermark3.getUpperTimeBound());
// verify that watermarks are increasing in time.
assertTrue(watermark0.getLowerTimeBound() < watermark1.getLowerTimeBound());
assertTrue(watermark1.getLowerTimeBound() < watermark2.getLowerTimeBound());
assertTrue(watermark2.getLowerTimeBound() < watermark3.getLowerTimeBound());
// use watermark as lower and upper bounds.
Map<Segment, Long> positionMap0 = watermark0.getStreamCut().entrySet().stream().collect(Collectors.toMap(x -> new Segment(SCOPE, STREAM, x.getKey().getSegmentId()), Map.Entry::getValue));
StreamCut streamCutStart = new StreamCutImpl(streamObj, positionMap0);
Map<Stream, StreamCut> start = Collections.singletonMap(streamObj, streamCutStart);
Map<Segment, Long> positionMap2 = watermark2.getStreamCut().entrySet().stream().collect(Collectors.toMap(x -> new Segment(SCOPE, STREAM, x.getKey().getSegmentId()), Map.Entry::getValue));
StreamCut streamCutEnd = new StreamCutImpl(streamObj, positionMap2);
Map<Stream, StreamCut> end = Collections.singletonMap(streamObj, streamCutEnd);
@Cleanup ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(SCOPE, controller, syncClientFactory);
String readerGroup = "rg";
readerGroupManager.createReaderGroup(readerGroup, ReaderGroupConfig.builder().stream(streamObj).startingStreamCuts(start).endingStreamCuts(end).build());
// create reader on the stream
@Cleanup final EventStreamReader<Long> reader = clientFactory.createReader("myreader", readerGroup, javaSerializer, ReaderConfig.builder().build());
// read events from the reader.
// verify that events read belong to the bound
EventRead<Long> event = reader.readNextEvent(10000L);
AtomicReference<TimeWindow> currentTimeWindow = new AtomicReference<>();
AssertExtensions.assertEventuallyEquals(true, () -> {
currentTimeWindow.set(reader.getCurrentTimeWindow(streamObj));
return currentTimeWindow.get() != null && currentTimeWindow.get().getLowerTimeBound() != null && currentTimeWindow.get().getUpperTimeBound() != null;
}, 100000);
log.info("current time window = {}", currentTimeWindow.get());
while (event.getEvent() != null) {
Long time = event.getEvent();
log.info("event read = {}", time);
event.getPosition();
assertTrue(time >= currentTimeWindow.get().getLowerTimeBound());
event = reader.readNextEvent(10000L);
if (event.isCheckpoint()) {
event = reader.readNextEvent(10000L);
}
}
}
use of io.pravega.client.control.impl.ControllerImpl in project pravega by pravega.
the class RetentionTest method retentionTest.
private CompletableFuture<Void> retentionTest(String streamName, boolean sizeBased) throws Exception {
return CompletableFuture.runAsync(() -> {
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(clientConfig);
ControllerImpl controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), connectionFactory.getInternalExecutor());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionFactory);
log.info("Invoking Writer test with Controller URI: {}", controllerURI);
// create a writer
@Cleanup EventStreamWriter<Serializable> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build());
// write an event
String writeEvent = "event";
writer.writeEvent(writeEvent);
if (sizeBased) {
// since truncation always happens at an event boundary, for size based, we will write two events,
// so that truncation can happen at the first event.
writer.writeEvent(writeEvent);
}
writer.flush();
log.debug("Writing event: {} ", writeEvent);
// sleep for 5 mins -- retention frequency is set to 2 minutes. So in 5 minutes we should definitely have
// 2 retention cycles, with a stream cut being computed in first cycle and truncation happening on the
// previously computed streamcut in second cycle.
// for time based retention, we wrote one event, which would get truncated.
// for size based retention we wrote two events such that stream would retain at least 1 byte as prescribed by
// the policy
Exceptions.handleInterrupted(() -> Thread.sleep(5 * 60 * 1000));
// create a reader
ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, clientConfig);
String groupName = READER_GROUP + streamName;
groupManager.createReaderGroup(groupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(SCOPE, streamName)).build());
EventStreamReader<String> reader = clientFactory.createReader(UUID.randomUUID().toString(), groupName, new JavaSerializer<>(), ReaderConfig.builder().build());
if (sizeBased) {
// we should read one write event back from the stream.
String event = reader.readNextEvent(6000).getEvent();
assertEquals(event, writeEvent);
}
// verify reader functionality is unaffected post truncation
String event = "newEvent";
writer.writeEvent(event);
log.info("Writing event: {}", event);
Assert.assertEquals(event, reader.readNextEvent(6000).getEvent());
log.debug("The stream is already truncated.Simple retention test passed.");
});
}
use of io.pravega.client.control.impl.ControllerImpl in project pravega by pravega.
the class StreamCutsTest method setup.
@Before
public void setup() {
Service conService = Utils.createPravegaControllerService(null);
List<URI> ctlURIs = conService.getServiceDetails();
controllerURI = ctlURIs.get(0);
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).maxBackoffMillis(5000).build(), executor);
streamManager = StreamManager.create(clientConfig);
assertTrue("Creating scope", streamManager.createScope(SCOPE));
assertTrue("Creating stream one", streamManager.createStream(SCOPE, STREAM_ONE, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(RG_PARALLELISM_ONE)).build()));
assertTrue("Creating stream two", streamManager.createStream(SCOPE, STREAM_TWO, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(RG_PARALLELISM_TWO)).build()));
}
use of io.pravega.client.control.impl.ControllerImpl in project pravega by pravega.
the class AutoScaleTest method scaleUpTest.
/**
* Invoke the simple scale up Test, produce traffic from multiple writers in parallel.
* The test will periodically check if a scale event has occurred by talking to controller via
* controller client.
*
* @throws InterruptedException if interrupted
* @throws URISyntaxException If URI is invalid
*/
private CompletableFuture<Void> scaleUpTest() {
ClientFactoryImpl clientFactory = getClientFactory();
ControllerImpl controller = getController();
final AtomicBoolean exit = new AtomicBoolean(false);
createWriters(clientFactory, 6, SCOPE, SCALE_UP_STREAM_NAME);
// overall wait for test to complete in 260 seconds (4.2 minutes) or scale up, whichever happens first.
return Retry.withExpBackoff(10, 10, 30, Duration.ofSeconds(10).toMillis()).retryingOn(ScaleOperationNotDoneException.class).throwingOn(RuntimeException.class).runAsync(() -> controller.getCurrentSegments(SCOPE, SCALE_UP_STREAM_NAME).thenAccept(x -> {
log.debug("size ==" + x.getSegments().size());
if (x.getSegments().size() == 1) {
throw new ScaleOperationNotDoneException();
} else {
log.info("scale up done successfully");
exit.set(true);
}
}), scaleExecutorService);
}
use of io.pravega.client.control.impl.ControllerImpl in project pravega by pravega.
the class MetadataScalabilityTest method truncation.
void truncation(ControllerImpl controller, List<List<Segment>> listOfEpochs) {
int numSegments = getStreamConfig().getScalingPolicy().getMinNumSegments();
int scalesToPerform = getScalesToPerform();
Stream stream = new StreamImpl(SCOPE, getStreamName());
// try SCALES_TO_PERFORM randomly generated stream cuts and truncate stream at those
// stream cuts.
List<AtomicInteger> indexes = new LinkedList<>();
Random rand = new Random();
for (int i = 0; i < numSegments; i++) {
indexes.add(new AtomicInteger(1));
}
Futures.loop(() -> indexes.stream().allMatch(x -> x.get() < scalesToPerform - 1), () -> {
// We randomly generate a stream cut in each iteration of this loop. A valid stream
// cut in this scenario contains for each position i in [0, numSegments -1], a segment
// from one of the scale epochs of the stream. For each position i, we randomly
// choose an epoch and pick the segment at position i. It increments the epoch
// index accordingly (indexes list) so that in the next iteration it chooses a later
// epoch for the same i.
//
// Because the segment in position i always contain the range [d * (i-1), d * i],
// where d = 1 / (number of segments), the stream cut is guaranteed to cover
// the entire key space.
Map<Segment, Long> map = new HashMap<>();
for (int i = 0; i < numSegments; i++) {
AtomicInteger index = indexes.get(i);
index.set(index.get() + rand.nextInt(scalesToPerform - index.get()));
map.put(listOfEpochs.get(index.get()).get(i), 0L);
}
StreamCut cut = new StreamCutImpl(stream, map);
log.info("truncating stream at {}", map);
return controller.truncateStream(SCOPE, streamName, cut).thenCompose(truncated -> {
log.info("stream truncated successfully at {}", cut);
assertTrue(truncated);
// we will just validate that a non empty value is returned.
return controller.getSuccessors(cut).thenAccept(successors -> {
assertTrue(successors.getSegments().size() > 0);
log.info("Successors for streamcut {} are {}", cut, successors);
});
});
}, executorService).join();
}
Aggregations