use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class ScaleTest method main.
public static void main(String[] args) throws Exception {
try {
@Cleanup("shutdownNow") val executor = ExecutorServiceHelpers.newScheduledThreadPool(1, "test");
@Cleanup TestingServer zkTestServer = new TestingServerStarter().start();
ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
TableStore tableStore = serviceBuilder.createTableStoreService();
int port = Config.SERVICE_PORT;
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, port, store, tableStore, serviceBuilder.getLowPriorityExecutor());
server.startListening();
// Create controller object for testing against a separate controller report.
@Cleanup ControllerWrapper controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), port);
Controller controller = controllerWrapper.getController();
final String scope = "scope";
controllerWrapper.getControllerService().createScope(scope, 0L).get();
final String streamName = "stream1";
final StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
Stream stream = new StreamImpl(scope, streamName);
log.info("Creating stream {}/{}", scope, streamName);
if (!controller.createStream(scope, streamName, config).get()) {
log.error("Stream already existed, exiting");
return;
}
// Test 1: scale stream: split one segment into two
log.info("Scaling stream {}/{}, splitting one segment into two", scope, streamName);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.5);
map.put(0.5, 1.0);
if (!controller.scaleStream(stream, Collections.singletonList(0L), map, executor).getFuture().get()) {
log.error("Scale stream: splitting segment into two failed, exiting");
return;
}
// Test 2: scale stream: merge two segments into one
log.info("Scaling stream {}/{}, merging two segments into one", scope, streamName);
CompletableFuture<Boolean> scaleResponseFuture = controller.scaleStream(stream, Arrays.asList(1L, 2L), Collections.singletonMap(0.0, 1.0), executor).getFuture();
if (!scaleResponseFuture.get()) {
log.error("Scale stream: merging two segments into one failed, exiting");
return;
}
// Test 3: create a transaction, and try scale operation, it should fail with precondition check failure
CompletableFuture<TxnSegments> txnFuture = controller.createTransaction(stream, 5000);
TxnSegments transaction = txnFuture.get();
if (transaction == null) {
log.error("Create transaction failed, exiting");
return;
}
log.info("Scaling stream {}/{}, splitting one segment into two, while transaction is ongoing", scope, streamName);
scaleResponseFuture = controller.scaleStream(stream, Collections.singletonList(3L), map, executor).getFuture();
CompletableFuture<Boolean> future = scaleResponseFuture.whenComplete((r, e) -> {
if (e != null) {
log.error("Failed: scale with ongoing transaction.", e);
} else if (getAndHandleExceptions(controller.checkTransactionStatus(stream, transaction.getTxnId()), RuntimeException::new) != Transaction.Status.OPEN) {
log.info("Success: scale with ongoing transaction.");
} else {
log.error("Failed: scale with ongoing transaction.");
}
});
CompletableFuture<Void> statusFuture = controller.abortTransaction(stream, transaction.getTxnId());
statusFuture.get();
future.get();
log.info("All scaling test PASSED");
ExecutorServiceHelpers.shutdown(executor);
System.exit(0);
} catch (Throwable t) {
log.error("test failed with {}", t);
System.exit(-1);
}
}
use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class MetadataScalabilityTest method truncation.
void truncation(ControllerImpl controller, List<List<Segment>> listOfEpochs) {
int numSegments = getStreamConfig().getScalingPolicy().getMinNumSegments();
int scalesToPerform = getScalesToPerform();
Stream stream = new StreamImpl(SCOPE, getStreamName());
// try SCALES_TO_PERFORM randomly generated stream cuts and truncate stream at those
// stream cuts.
List<AtomicInteger> indexes = new LinkedList<>();
Random rand = new Random();
for (int i = 0; i < numSegments; i++) {
indexes.add(new AtomicInteger(1));
}
Futures.loop(() -> indexes.stream().allMatch(x -> x.get() < scalesToPerform - 1), () -> {
// We randomly generate a stream cut in each iteration of this loop. A valid stream
// cut in this scenario contains for each position i in [0, numSegments -1], a segment
// from one of the scale epochs of the stream. For each position i, we randomly
// choose an epoch and pick the segment at position i. It increments the epoch
// index accordingly (indexes list) so that in the next iteration it chooses a later
// epoch for the same i.
//
// Because the segment in position i always contain the range [d * (i-1), d * i],
// where d = 1 / (number of segments), the stream cut is guaranteed to cover
// the entire key space.
Map<Segment, Long> map = new HashMap<>();
for (int i = 0; i < numSegments; i++) {
AtomicInteger index = indexes.get(i);
index.set(index.get() + rand.nextInt(scalesToPerform - index.get()));
map.put(listOfEpochs.get(index.get()).get(i), 0L);
}
StreamCut cut = new StreamCutImpl(stream, map);
log.info("truncating stream at {}", map);
return controller.truncateStream(SCOPE, streamName, cut).thenCompose(truncated -> {
log.info("stream truncated successfully at {}", cut);
assertTrue(truncated);
// we will just validate that a non empty value is returned.
return controller.getSuccessors(cut).thenAccept(successors -> {
assertTrue(successors.getSegments().size() > 0);
log.info("Successors for streamcut {} are {}", cut, successors);
});
});
}, executorService).join();
}
use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class AutoScaleTest method scaleUpTest.
/**
* Invoke the simple scale up Test, produce traffic from multiple writers in parallel.
* The test will periodically check if a scale event has occurred by talking to controller via
* controller client.
*
* @throws InterruptedException if interrupted
* @throws URISyntaxException If URI is invalid
*/
private CompletableFuture<Void> scaleUpTest() {
ClientFactoryImpl clientFactory = getClientFactory();
ControllerImpl controller = getController();
final AtomicBoolean exit = new AtomicBoolean(false);
createWriters(clientFactory, 6, SCOPE, SCALE_UP_STREAM_NAME);
// overall wait for test to complete in 260 seconds (4.2 minutes) or scale up, whichever happens first.
return Retry.withExpBackoff(10, 10, 30, Duration.ofSeconds(10).toMillis()).retryingOn(ScaleOperationNotDoneException.class).throwingOn(RuntimeException.class).runAsync(() -> controller.getCurrentSegments(SCOPE, SCALE_UP_STREAM_NAME).thenAccept(x -> {
log.debug("size ==" + x.getSegments().size());
if (x.getSegments().size() == 1) {
throw new ScaleOperationNotDoneException();
} else {
log.info("scale up done successfully");
exit.set(true);
}
}), scaleExecutorService);
}
use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class AutoScaleTest method setup.
/**
* Invoke the createStream method, ensure we are able to create stream.
*
* @throws InterruptedException if interrupted
* @throws URISyntaxException If URI is invalid
* @throws ExecutionException if error in create stream
*/
@Before
public void setup() throws InterruptedException, ExecutionException {
// create a scope
Controller controller = getController();
executorService = ExecutorServiceHelpers.newScheduledThreadPool(5, "AutoScaleTest-main");
Boolean createScopeStatus = controller.createScope(SCOPE).get();
log.debug("create scope status {}", createScopeStatus);
// create a stream
Boolean createStreamStatus = controller.createStream(SCOPE, SCALE_UP_STREAM_NAME, CONFIG_UP).get();
log.debug("create stream status for scale up stream {}", createStreamStatus);
createStreamStatus = controller.createStream(SCOPE, SCALE_DOWN_STREAM_NAME, CONFIG_DOWN).get();
log.debug("create stream status for scaledown stream {}", createStreamStatus);
log.debug("scale down stream starting segments:" + controller.getCurrentSegments(SCOPE, SCALE_DOWN_STREAM_NAME).get().getSegments().size());
Map<Double, Double> keyRanges = new HashMap<>();
keyRanges.put(0.0, 0.5);
keyRanges.put(0.5, 1.0);
Boolean status = controller.scaleStream(new StreamImpl(SCOPE, SCALE_DOWN_STREAM_NAME), Collections.singletonList(0L), keyRanges, executorService).getFuture().get();
assertTrue(status);
createStreamStatus = controller.createStream(SCOPE, SCALE_UP_TXN_STREAM_NAME, CONFIG_TXN).get();
log.debug("create stream status for txn stream {}", createStreamStatus);
}
use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class ReadWithAutoScaleTest method setup.
/**
* Invoke the createStream method, ensure we are able to create scope, stream.
*
* @throws InterruptedException if interrupted
* @throws URISyntaxException If URI is invalid
* @throws ExecutionException if error in create stream
*/
@Before
public void setup() throws InterruptedException, ExecutionException {
Controller controller = getController();
executorService = ExecutorServiceHelpers.newScheduledThreadPool(4, "ReadWithAutoScaleTest-main");
// create a scope
Boolean createScopeStatus = controller.createScope(SCOPE).get();
log.debug("Create scope status {}", createScopeStatus);
// create a stream
Boolean createStreamStatus = controller.createStream(SCOPE, STREAM_NAME, CONFIG).get();
log.debug("Create stream status {}", createStreamStatus);
}
Aggregations