use of io.pravega.client.control.impl.ControllerImpl in project pravega by pravega.
the class MultiControllerTest method createScopeWithSimpleRetry.
private boolean createScopeWithSimpleRetry(String scopeName, URI controllerURI) throws ExecutionException, InterruptedException {
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
// Need to retry since there is a delay for the mesos DNS name to resolve correctly.
@Cleanup final ControllerImpl controllerClient = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), executorService);
CompletableFuture<Boolean> retryResult = Retry.withExpBackoff(500, 2, 10, 5000).retryingOn(Exception.class).throwingOn(IllegalArgumentException.class).runAsync(() -> controllerClient.createScope(scopeName), executorService);
return retryResult.get();
}
use of io.pravega.client.control.impl.ControllerImpl in project pravega by pravega.
the class MetadataScalabilityTest method scale.
List<List<Segment>> scale(ControllerImpl controller) {
int numSegments = getStreamConfig().getScalingPolicy().getMinNumSegments();
int scalesToPerform = getScalesToPerform();
// manually scale the stream SCALES_TO_PERFORM times
Stream stream = new StreamImpl(SCOPE, getStreamName());
AtomicInteger counter = new AtomicInteger(0);
List<List<Segment>> listOfEpochs = new LinkedList<>();
CompletableFuture<Void> scaleFuture = Futures.loop(() -> counter.incrementAndGet() <= scalesToPerform, () -> controller.getCurrentSegments(SCOPE, streamName).thenCompose(segments -> {
ArrayList<Segment> sorted = Lists.newArrayList(segments.getSegments().stream().sorted(Comparator.comparingInt(x -> NameUtils.getSegmentNumber(x.getSegmentId()) % numSegments)).collect(Collectors.toList()));
listOfEpochs.add(sorted);
// note: with SCALES_TO_PERFORM < numSegments, we can use the segment number as the index
// into the range map
Pair<List<Long>, Map<Double, Double>> scaleInput = getScaleInput(sorted);
List<Long> segmentsToSeal = scaleInput.getKey();
Map<Double, Double> newRanges = scaleInput.getValue();
return controller.scaleStream(stream, segmentsToSeal, newRanges, executorService).getFuture().thenAccept(scaleStatus -> {
log.info("scale stream for epoch {} completed with status {}", counter.get(), scaleStatus);
assert scaleStatus;
});
}), executorService);
scaleFuture.join();
return listOfEpochs;
}
use of io.pravega.client.control.impl.ControllerImpl in project pravega by pravega.
the class PravegaTest method simpleTest.
/**
* Invoke the simpleTest, ensure we are able to produce events.
* The test fails incase of exceptions while writing to the stream.
*/
@Test
public void simpleTest() {
Service conService = Utils.createPravegaControllerService(null);
List<URI> ctlURIs = conService.getServiceDetails();
URI controllerUri = ctlURIs.get(0);
log.info("Invoking create stream with Controller URI: {}", controllerUri);
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(Utils.buildClientConfig(controllerUri));
@Cleanup ControllerImpl controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(Utils.buildClientConfig(controllerUri)).build(), connectionFactory.getInternalExecutor());
assertTrue(controller.createScope(STREAM_SCOPE).join());
assertTrue(controller.createStream(STREAM_SCOPE, STREAM_NAME, config).join());
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(STREAM_SCOPE, Utils.buildClientConfig(controllerUri));
log.info("Invoking Writer test with Controller URI: {}", controllerUri);
@Cleanup EventStreamWriter<Serializable> writer = clientFactory.createEventWriter(STREAM_NAME, new JavaSerializer<>(), EventWriterConfig.builder().build());
for (int i = 0; i < NUM_EVENTS; i++) {
String event = "Publish " + i + "\n";
log.debug("Producing event: {} ", event);
// any exceptions while writing the event will fail the test.
writer.writeEvent("", event);
writer.flush();
}
log.info("Invoking Reader test.");
ReaderGroupManager groupManager = ReaderGroupManager.withScope(STREAM_SCOPE, Utils.buildClientConfig(controllerUri));
groupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().stream(Stream.of(STREAM_SCOPE, STREAM_NAME)).build());
@Cleanup EventStreamReader<String> reader = clientFactory.createReader(UUID.randomUUID().toString(), READER_GROUP, new JavaSerializer<>(), ReaderConfig.builder().build());
int readCount = 0;
EventRead<String> event = null;
do {
event = reader.readNextEvent(10_000);
log.debug("Read event: {}.", event.getEvent());
if (event.getEvent() != null) {
readCount++;
}
// try reading until all the written events are read, else the test will timeout.
} while ((event.getEvent() != null || event.isCheckpoint()) && readCount < NUM_EVENTS);
assertEquals("Read count should be equal to write count", NUM_EVENTS, readCount);
}
use of io.pravega.client.control.impl.ControllerImpl in project pravega by pravega.
the class AutoScaleTest method scaleUpTxnTest.
/**
* Invoke the scale up Test with transactional writes. Produce traffic from multiple writers in parallel. Each
* writer writes using transactions. The test will periodically check if a scale event has occurred by talking to
* controller via controller client.
*
* @throws InterruptedException if interrupted
* @throws URISyntaxException If URI is invalid
*/
private CompletableFuture<Void> scaleUpTxnTest() {
ControllerImpl controller = getController();
final AtomicBoolean exit = new AtomicBoolean(false);
ClientFactoryImpl clientFactory = getClientFactory();
startWritingIntoTxn(clientFactory.createTransactionalEventWriter("writer", SCALE_UP_TXN_STREAM_NAME, new JavaSerializer<>(), EventWriterConfig.builder().build()), exit);
// overall wait for test to complete in 260 seconds (4.2 minutes) or scale up, whichever happens first.
return Retry.withExpBackoff(10, 10, 30, Duration.ofSeconds(10).toMillis()).retryingOn(ScaleOperationNotDoneException.class).throwingOn(RuntimeException.class).runAsync(() -> controller.getCurrentSegments(SCOPE, SCALE_UP_TXN_STREAM_NAME).thenAccept(x -> {
if (x.getSegments().size() == 1) {
throw new ScaleOperationNotDoneException();
} else {
log.info("txn test scale up done successfully");
exit.set(true);
}
}), scaleExecutorService);
}
use of io.pravega.client.control.impl.ControllerImpl in project pravega by pravega.
the class BookieFailoverTest method setup.
@Before
public void setup() {
// Get zk details to verify if controller, SSS are running
Service zkService = Utils.createZookeeperService();
List<URI> zkUris = zkService.getServiceDetails();
log.debug("Zookeeper service details: {}", zkUris);
// get the zk ip details and pass it to host, controller
URI zkUri = zkUris.get(0);
// Verify bookie is running
bookkeeperService = Utils.createBookkeeperService(zkUri);
List<URI> bkUris = bookkeeperService.getServiceDetails();
log.debug("Bookkeeper service details: {}", bkUris);
// Verify controller is running.
controllerInstance = Utils.createPravegaControllerService(zkUri);
assertTrue(controllerInstance.isRunning());
List<URI> conURIs = controllerInstance.getServiceDetails();
log.info("Pravega Controller service instance details: {}", conURIs);
// Fetch all the RPC endpoints and construct the client URIs.
final List<String> uris = conURIs.stream().filter(ISGRPC).map(URI::getAuthority).collect(Collectors.toList());
controllerURIDirect = URI.create((Utils.TLS_AND_AUTH_ENABLED ? TLS : TCP) + String.join(",", uris));
log.info("Controller Service direct URI: {}", controllerURIDirect);
// Verify segment store is running.
segmentStoreInstance = Utils.createPravegaSegmentStoreService(zkUri, controllerURIDirect);
assertTrue(segmentStoreInstance.isRunning());
log.info("Pravega Segmentstore service instance details: {}", segmentStoreInstance.getServiceDetails());
executorService = ExecutorServiceHelpers.newScheduledThreadPool(NUM_READERS + NUM_WRITERS + 1, "BookieFailoverTest-main");
controllerExecutorService = ExecutorServiceHelpers.newScheduledThreadPool(2, "BookieFailoverTest-controller");
// get Controller Uri
controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(Utils.buildClientConfig(controllerURIDirect)).maxBackoffMillis(5000).build(), controllerExecutorService);
testState = new TestState(false);
// read and write count variables
testState.writersListComplete.add(0, testState.writersComplete);
streamManager = new StreamManagerImpl(Utils.buildClientConfig(controllerURIDirect));
createScopeAndStream(SCOPE, STREAM, config, streamManager);
log.info("Scope passed to client factory {}", SCOPE);
clientFactory = new ClientFactoryImpl(SCOPE, controller, new SocketConnectionFactoryImpl(Utils.buildClientConfig(controllerURIDirect)));
readerGroupManager = ReaderGroupManager.withScope(SCOPE, Utils.buildClientConfig(controllerURIDirect));
}
Aggregations