use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class ControllerFailoverTest method failoverTest.
@Test
public void failoverTest() throws InterruptedException, ExecutionException {
String scope = "testFailoverScope" + RandomStringUtils.randomAlphabetic(5);
String stream = "testFailoverStream" + RandomStringUtils.randomAlphabetic(5);
int initialSegments = 1;
List<Long> segmentsToSeal = Collections.singletonList(0L);
Map<Double, Double> newRangesToCreate = new HashMap<>();
newRangesToCreate.put(0.0, 1.0);
ClientConfig clientConfig = Utils.buildClientConfig(controllerURIDirect);
// Connect with first controller instance.
final Controller controller1 = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), executorService);
// Create scope, stream, and a transaction with high timeout value.
controller1.createScope(scope).join();
log.info("Scope {} created successfully", scope);
createStream(controller1, scope, stream, ScalingPolicy.fixed(initialSegments));
log.info("Stream {}/{} created successfully", scope, stream);
long txnCreationTimestamp = System.nanoTime();
StreamImpl stream1 = new StreamImpl(scope, stream);
// Initiate scale operation. It will block until ongoing transaction is complete.
controller1.startScale(stream1, segmentsToSeal, newRangesToCreate).join();
// Now stop the controller instance executing scale operation.
Futures.getAndHandleExceptions(controllerService.scaleService(0), ExecutionException::new);
log.info("Successfully stopped one instance of controller service");
// restart controller service
Futures.getAndHandleExceptions(controllerService.scaleService(1), ExecutionException::new);
log.info("Successfully stopped one instance of controller service");
List<URI> controllerUris = controllerService.getServiceDetails();
// Fetch all the RPC endpoints and construct the client URIs.
final List<String> uris = controllerUris.stream().filter(ISGRPC).map(URI::getAuthority).collect(Collectors.toList());
controllerURIDirect = URI.create((Utils.TLS_AND_AUTH_ENABLED ? TLS : TCP) + String.join(",", uris));
log.info("Controller Service direct URI: {}", controllerURIDirect);
ClientConfig clientConf = Utils.buildClientConfig(controllerURIDirect);
// Connect to another controller instance.
@Cleanup final Controller controller2 = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConf).build(), executorService);
// Note: if scale does not complete within desired time, test will timeout.
boolean scaleStatus = controller2.checkScaleStatus(stream1, 0).join();
while (!scaleStatus) {
scaleStatus = controller2.checkScaleStatus(stream1, 0).join();
Thread.sleep(30000);
}
segmentsToSeal = Collections.singletonList(NameUtils.computeSegmentId(1, 1));
newRangesToCreate = new HashMap<>();
newRangesToCreate.put(0.0, 0.5);
newRangesToCreate.put(0.5, 1.0);
controller2.scaleStream(stream1, segmentsToSeal, newRangesToCreate, executorService).getFuture().join();
log.info("Checking whether scale operation succeeded by fetching current segments");
StreamSegments streamSegments = controller2.getCurrentSegments(scope, stream).join();
log.info("Current segment count= {}", streamSegments.getSegments().size());
Assert.assertEquals(2, streamSegments.getSegments().size());
}
use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class ReadWithAutoScaleTest method scaleTestsWithReader.
@Test
public void scaleTestsWithReader() {
URI controllerUri = getControllerURI();
Controller controller = getController();
testState = new TestState(true);
final AtomicBoolean stopWriteFlag = new AtomicBoolean(false);
final AtomicBoolean stopReadFlag = new AtomicBoolean(false);
@Cleanup EventStreamClientFactory clientFactory = getClientFactory();
// 1. Start writing events to the Stream.
List<CompletableFuture<Void>> writers = new ArrayList<>();
writers.add(startWritingIntoTxn(clientFactory.createTransactionalEventWriter("initWriter", STREAM_NAME, new JavaSerializer<>(), EventWriterConfig.builder().transactionTimeoutTime(25000).build()), stopWriteFlag));
// 2. Start a reader group with 2 readers (The stream is configured with 2 segments.)
// 2.1 Create a reader group.
log.info("Creating Reader group : {}", READER_GROUP_NAME);
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(SCOPE, Utils.buildClientConfig(controllerUri));
readerGroupManager.createReaderGroup(READER_GROUP_NAME, ReaderGroupConfig.builder().stream(Stream.of(SCOPE, STREAM_NAME)).build());
// 2.2 Create readers.
CompletableFuture<Void> reader1 = startReading(clientFactory.createReader("reader1", READER_GROUP_NAME, new JavaSerializer<>(), ReaderConfig.builder().build()), stopReadFlag);
CompletableFuture<Void> reader2 = startReading(clientFactory.createReader("reader2", READER_GROUP_NAME, new JavaSerializer<>(), ReaderConfig.builder().build()), stopReadFlag);
// 3 Now increase the number of TxnWriters to trigger scale operation.
log.info("Increasing the number of writers to 6");
for (int i = 0; i < 5; i++) {
writers.add(startWritingIntoTxn(clientFactory.createTransactionalEventWriter("writer-" + i, STREAM_NAME, new JavaSerializer<>(), EventWriterConfig.builder().transactionTimeoutTime(25000).build()), stopWriteFlag));
}
// 4 Wait until the scale operation is triggered (else time out)
// validate the data read by the readers ensuring all the events are read and there are no duplicates.
CompletableFuture<Void> testResult = Retry.withExpBackoff(10, 10, 40, ofSeconds(10).toMillis()).retryingOn(ScaleOperationNotDoneException.class).throwingOn(RuntimeException.class).runAsync(() -> controller.getCurrentSegments(SCOPE, STREAM_NAME).thenAccept(x -> {
int currentNumOfSegments = x.getSegments().size();
if (currentNumOfSegments == 2) {
log.info("The current number of segments is equal to 2, ScaleOperation did not happen");
// Scaling operation did not happen, retry operation.
throw new ScaleOperationNotDoneException();
} else if (currentNumOfSegments > 2) {
// scale operation successful.
log.info("Current Number of segments is {}", currentNumOfSegments);
stopWriteFlag.set(true);
} else {
Assert.fail("Current number of Segments reduced to less than 2. Failure of test");
}
}), scaleExecutorService).thenCompose(v -> Futures.allOf(writers)).thenRun(this::waitForTxnsToComplete).thenCompose(v -> {
stopReadFlag.set(true);
log.info("All writers have stopped. Setting Stop_Read_Flag. Event Written Count:{}, Event Read " + "Count: {}", testState.writtenEvents, testState.readEvents);
return CompletableFuture.allOf(reader1, reader2);
}).thenRun(this::validateResults);
Futures.getAndHandleExceptions(testResult.whenComplete((r, e) -> {
recordResult(testResult, "ScaleUpWithTxnWithReaderGroup");
}), RuntimeException::new);
readerGroupManager.deleteReaderGroup(READER_GROUP_NAME);
}
use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class LargeEventWriter method writeLargeEvent.
/**
* Write the provided list of events (atomically) to the provided segment.
*
* @param segment The segment to write to
* @param events The events to append
* @param tokenProvider A token provider
* @param config Used for retry configuration parameters
* @throws NoSuchSegmentException If the provided segment does not exit.
* @throws SegmentSealedException If the segment is sealed.
* @throws AuthenticationException If the token can't be used for this segment.
* @throws UnsupportedOperationException If the server does not support large events.
*/
public void writeLargeEvent(Segment segment, List<ByteBuffer> events, DelegationTokenProvider tokenProvider, EventWriterConfig config) throws NoSuchSegmentException, AuthenticationException, SegmentSealedException {
List<ByteBuf> payloads = createBufs(events);
int attempts = 1 + Math.max(0, config.getRetryAttempts());
Retry.withExpBackoff(config.getInitialBackoffMillis(), config.getBackoffMultiple(), attempts, config.getMaxBackoffMillis()).retryWhen(t -> {
Throwable ex = Exceptions.unwrap(t);
if (ex instanceof ConnectionFailedException) {
log.info("Connection failure while sending large event: {}. Retrying", ex.getMessage());
return true;
} else if (ex instanceof TokenExpiredException) {
tokenProvider.signalTokenExpired();
log.info("Authentication token expired while writing large event to segment {}. Retrying", segment);
return true;
} else {
return false;
}
}).run(() -> {
@Cleanup RawClient client = new RawClient(controller, connectionPool, segment);
write(segment, payloads, client, tokenProvider);
return null;
});
}
use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class EndToEndTransactionOrderTest method waitTillCommitted.
private CompletableFuture<Void> waitTillCommitted(Controller controller, Stream s, UUID key, ConcurrentSkipListSet<UUID> uncommitted) {
AtomicBoolean committed = new AtomicBoolean(false);
AtomicInteger counter = new AtomicInteger(0);
// check 6 times with 5 second gap until transaction is committed. if it is not committed, declare it uncommitted
return Futures.loop(() -> !committed.get() && counter.getAndIncrement() < 5, () -> Futures.delayedFuture(() -> controller.checkTransactionStatus(s, key).thenAccept(status -> {
committed.set(status.equals(Transaction.Status.COMMITTED));
}), 5000, executor), executor).thenAccept(v -> {
if (!committed.get()) {
uncommitted.add(key);
}
});
}
use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class EndToEndTransactionOrderTest method testOrder.
@Ignore
@Test(timeout = 100000)
public void testOrder() throws Exception {
final AtomicBoolean done = new AtomicBoolean(false);
CompletableFuture<Void> writer1 = startWriter("1", clientFactory, done);
CompletableFuture<Void> writer2 = startWriter("2", clientFactory, done);
CompletableFuture<Void> writer3 = startWriter("3", clientFactory, done);
CompletableFuture<Void> writer4 = startWriter("4", clientFactory, done);
// perform multiple scale stream operations so that rolling transactions may happen
Stream s = new StreamImpl("test", "test");
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 1.0);
@Cleanup("shutdownNow") ScheduledExecutorService executor = ExecutorServiceHelpers.newScheduledThreadPool(1, "order");
controller.scaleStream(s, Collections.singletonList(0L), map, executor).getFuture().get();
controller.scaleStream(s, Collections.singletonList(NameUtils.computeSegmentId(1, 1)), map, executor).getFuture().get();
controller.scaleStream(s, Collections.singletonList(NameUtils.computeSegmentId(2, 2)), map, executor).getFuture().get();
// stop writers
done.set(true);
CompletableFuture.allOf(writer1, writer2, writer3, writer4).join();
// wait for all transactions to commit
Futures.allOf(eventToTxnMap.entrySet().stream().map(x -> waitTillCommitted(controller, s, x.getValue(), uncommitted)).collect(Collectors.toList())).join();
assertTrue(uncommitted.isEmpty());
// read all events using a single reader and verify the order
List<Triple<Integer, UUID, String>> eventOrder = new LinkedList<>();
// create a reader
while (!eventToTxnMap.isEmpty()) {
EventRead<Integer> integerEventRead = reader.readNextEvent(SECONDS.toMillis(60));
if (integerEventRead.getEvent() != null) {
int event1 = integerEventRead.getEvent();
UUID txnId = eventToTxnMap.remove(event1);
String writerId = txnToWriter.get(txnId);
UUID first = writersList.get(writerId).remove(0);
eventOrder.add(new ImmutableTriple<>(event1, txnId, writerId));
assertEquals(first, txnId);
}
}
}
Aggregations