use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class AutoScaleTest method scaleUpTxnTest.
/**
* Invoke the scale up Test with transactional writes. Produce traffic from multiple writers in parallel. Each
* writer writes using transactions. The test will periodically check if a scale event has occurred by talking to
* controller via controller client.
*
* @throws InterruptedException if interrupted
* @throws URISyntaxException If URI is invalid
*/
private CompletableFuture<Void> scaleUpTxnTest() {
ControllerImpl controller = getController();
final AtomicBoolean exit = new AtomicBoolean(false);
ClientFactoryImpl clientFactory = getClientFactory();
startWritingIntoTxn(clientFactory.createTransactionalEventWriter("writer", SCALE_UP_TXN_STREAM_NAME, new JavaSerializer<>(), EventWriterConfig.builder().build()), exit);
// overall wait for test to complete in 260 seconds (4.2 minutes) or scale up, whichever happens first.
return Retry.withExpBackoff(10, 10, 30, Duration.ofSeconds(10).toMillis()).retryingOn(ScaleOperationNotDoneException.class).throwingOn(RuntimeException.class).runAsync(() -> controller.getCurrentSegments(SCOPE, SCALE_UP_TXN_STREAM_NAME).thenAccept(x -> {
if (x.getSegments().size() == 1) {
throw new ScaleOperationNotDoneException();
} else {
log.info("txn test scale up done successfully");
exit.set(true);
}
}), scaleExecutorService);
}
use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class BookieFailoverTest method setup.
@Before
public void setup() {
// Get zk details to verify if controller, SSS are running
Service zkService = Utils.createZookeeperService();
List<URI> zkUris = zkService.getServiceDetails();
log.debug("Zookeeper service details: {}", zkUris);
// get the zk ip details and pass it to host, controller
URI zkUri = zkUris.get(0);
// Verify bookie is running
bookkeeperService = Utils.createBookkeeperService(zkUri);
List<URI> bkUris = bookkeeperService.getServiceDetails();
log.debug("Bookkeeper service details: {}", bkUris);
// Verify controller is running.
controllerInstance = Utils.createPravegaControllerService(zkUri);
assertTrue(controllerInstance.isRunning());
List<URI> conURIs = controllerInstance.getServiceDetails();
log.info("Pravega Controller service instance details: {}", conURIs);
// Fetch all the RPC endpoints and construct the client URIs.
final List<String> uris = conURIs.stream().filter(ISGRPC).map(URI::getAuthority).collect(Collectors.toList());
controllerURIDirect = URI.create((Utils.TLS_AND_AUTH_ENABLED ? TLS : TCP) + String.join(",", uris));
log.info("Controller Service direct URI: {}", controllerURIDirect);
// Verify segment store is running.
segmentStoreInstance = Utils.createPravegaSegmentStoreService(zkUri, controllerURIDirect);
assertTrue(segmentStoreInstance.isRunning());
log.info("Pravega Segmentstore service instance details: {}", segmentStoreInstance.getServiceDetails());
executorService = ExecutorServiceHelpers.newScheduledThreadPool(NUM_READERS + NUM_WRITERS + 1, "BookieFailoverTest-main");
controllerExecutorService = ExecutorServiceHelpers.newScheduledThreadPool(2, "BookieFailoverTest-controller");
// get Controller Uri
controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(Utils.buildClientConfig(controllerURIDirect)).maxBackoffMillis(5000).build(), controllerExecutorService);
testState = new TestState(false);
// read and write count variables
testState.writersListComplete.add(0, testState.writersComplete);
streamManager = new StreamManagerImpl(Utils.buildClientConfig(controllerURIDirect));
createScopeAndStream(SCOPE, STREAM, config, streamManager);
log.info("Scope passed to client factory {}", SCOPE);
clientFactory = new ClientFactoryImpl(SCOPE, controller, new SocketConnectionFactoryImpl(Utils.buildClientConfig(controllerURIDirect)));
readerGroupManager = ReaderGroupManager.withScope(SCOPE, Utils.buildClientConfig(controllerURIDirect));
}
use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class BatchClientSimpleTest method batchClientSimpleTest.
/**
* This test verifies the basic functionality of {@link BatchClientFactory}, including stream metadata checks, segment
* counts, parallel segment reads and reads with offsets using stream cuts.
*/
@Test
@SuppressWarnings("deprecation")
public void batchClientSimpleTest() {
final int totalEvents = RG_PARALLELISM * 100;
final int offsetEvents = RG_PARALLELISM * 20;
final int batchIterations = 4;
final Stream stream = Stream.of(SCOPE, STREAM);
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(clientConfig);
ControllerImpl controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), connectionFactory.getInternalExecutor());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionFactory);
@Cleanup BatchClientFactory batchClient = BatchClientFactory.withScope(SCOPE, clientConfig);
log.info("Invoking batchClientSimpleTest test with Controller URI: {}", controllerURI);
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, clientConfig);
groupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(SCOPE + "/" + STREAM).build());
ReaderGroup readerGroup = groupManager.getReaderGroup(READER_GROUP);
log.info("Writing events to stream");
// Write events to the Stream.
writeEvents(clientFactory, STREAM, totalEvents);
// Instantiate readers to consume from Stream up to truncatedEvents.
List<CompletableFuture<Integer>> futures = readEventFutures(clientFactory, READER_GROUP, RG_PARALLELISM, offsetEvents);
Futures.allOf(futures).join();
// Create a stream cut on the specified offset position.
Checkpoint cp = readerGroup.initiateCheckpoint("batchClientCheckpoint", executor).join();
StreamCut streamCut = cp.asImpl().getPositions().values().iterator().next();
// Instantiate the batch client and assert it provides correct stream info.
log.debug("Creating batch client.");
StreamInfo streamInfo = streamManager.getStreamInfo(SCOPE, stream.getStreamName());
log.debug("Validating stream metadata fields.");
assertEquals("Expected Stream name: ", STREAM, streamInfo.getStreamName());
assertEquals("Expected Scope name: ", SCOPE, streamInfo.getScope());
// Test that we can read events from parallel segments from an offset onwards.
log.debug("Reading events from stream cut onwards in parallel.");
List<SegmentRange> ranges = Lists.newArrayList(batchClient.getSegments(stream, streamCut, StreamCut.UNBOUNDED).getIterator());
assertEquals("Expected events read: ", totalEvents - offsetEvents, readFromRanges(ranges, batchClient));
// Emulate the behavior of Hadoop client: i) Get tail of Stream, ii) Read from current point until tail, iii) repeat.
log.debug("Reading in batch iterations.");
StreamCut currentTailStreamCut = streamManager.getStreamInfo(SCOPE, stream.getStreamName()).getTailStreamCut();
int readEvents = 0;
for (int i = 0; i < batchIterations; i++) {
writeEvents(clientFactory, STREAM, totalEvents);
// Read all the existing events in parallel segments from the previous tail to the current one.
ranges = Lists.newArrayList(batchClient.getSegments(stream, currentTailStreamCut, StreamCut.UNBOUNDED).getIterator());
assertEquals("Expected number of segments: ", RG_PARALLELISM, ranges.size());
readEvents += readFromRanges(ranges, batchClient);
log.debug("Events read in parallel so far: {}.", readEvents);
currentTailStreamCut = streamManager.getStreamInfo(SCOPE, stream.getStreamName()).getTailStreamCut();
}
assertEquals("Expected events read: .", totalEvents * batchIterations, readEvents);
// Truncate the stream in first place.
log.debug("Truncating stream at event {}.", offsetEvents);
assertTrue(controller.truncateStream(SCOPE, STREAM, streamCut).join());
// Test the batch client when we select to start reading a Stream from a truncation point.
StreamCut initialPosition = streamManager.getStreamInfo(SCOPE, stream.getStreamName()).getHeadStreamCut();
List<SegmentRange> newRanges = Lists.newArrayList(batchClient.getSegments(stream, initialPosition, StreamCut.UNBOUNDED).getIterator());
assertEquals("Expected events read: ", (totalEvents - offsetEvents) + totalEvents * batchIterations, readFromRanges(newRanges, batchClient));
log.debug("Events correctly read from Stream: simple batch client test passed.");
}
use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class MultiReaderWriterWithFailOverTest method setup.
@Before
public void setup() {
// Get zk details to verify if controller, SSS are running
Service zkService = Utils.createZookeeperService();
List<URI> zkUris = zkService.getServiceDetails();
log.debug("Zookeeper service details: {}", zkUris);
// get the zk ip details and pass it to host, controller
URI zkUri = zkUris.get(0);
// Verify controller is running.
controllerInstance = Utils.createPravegaControllerService(zkUri);
assertTrue(controllerInstance.isRunning());
List<URI> conURIs = controllerInstance.getServiceDetails();
log.info("Pravega Controller service instance details: {}", conURIs);
// Fetch all the RPC endpoints and construct the client URIs.
final List<String> uris = conURIs.stream().filter(ISGRPC).map(URI::getAuthority).collect(Collectors.toList());
controllerURIDirect = URI.create("tcp://" + String.join(",", uris));
log.info("Controller Service direct URI: {}", controllerURIDirect);
// Verify segment store is running.
segmentStoreInstance = Utils.createPravegaSegmentStoreService(zkUri, controllerURIDirect);
assertTrue(segmentStoreInstance.isRunning());
log.info("Pravega Segmentstore service instance details: {}", segmentStoreInstance.getServiceDetails());
executorService = ExecutorServiceHelpers.newScheduledThreadPool(NUM_READERS + NUM_WRITERS + 1, "MultiReaderWriterWithFailOverTest-main");
controllerExecutorService = ExecutorServiceHelpers.newScheduledThreadPool(2, "MultiReaderWriterWithFailoverTest-controller");
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURIDirect);
// get Controller Uri
controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).maxBackoffMillis(5000).build(), controllerExecutorService);
testState = new TestState(false);
// read and write count variables
streamManager = new StreamManagerImpl(clientConfig);
createScopeAndStream(scope, STREAM_NAME, config, streamManager);
log.info("Scope passed to client factory {}", scope);
clientFactory = new ClientFactoryImpl(scope, controller, new SocketConnectionFactoryImpl(clientConfig));
readerGroupManager = ReaderGroupManager.withScope(scope, clientConfig);
}
use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class ReadTxnWriteAutoScaleWithFailoverTest method setup.
@Before
public void setup() {
// Get zk details to verify if controller, segmentstore are running
Service zkService = Utils.createZookeeperService();
List<URI> zkUris = zkService.getServiceDetails();
log.debug("Zookeeper service details: {}", zkUris);
// get the zk ip details and pass it to host, controller
URI zkUri = zkUris.get(0);
// Verify controller is running.
controllerInstance = Utils.createPravegaControllerService(zkUri);
assertTrue(controllerInstance.isRunning());
List<URI> conURIs = controllerInstance.getServiceDetails();
log.info("Pravega Controller service instance details: {}", conURIs);
// Fetch all the RPC endpoints and construct the client URIs.
final List<String> uris = conURIs.stream().filter(ISGRPC).map(URI::getAuthority).collect(Collectors.toList());
controllerURIDirect = URI.create((Utils.TLS_AND_AUTH_ENABLED ? TLS : TCP) + String.join(",", uris));
log.info("Controller Service direct URI: {}", controllerURIDirect);
// Verify segment store is running.
segmentStoreInstance = Utils.createPravegaSegmentStoreService(zkUri, controllerURIDirect);
assertTrue(segmentStoreInstance.isRunning());
log.info("Pravega Segmentstore service instance details: {}", segmentStoreInstance.getServiceDetails());
// num. of readers + num. of writers + 1 to run checkScale operation
executorService = ExecutorServiceHelpers.newScheduledThreadPool(NUM_READERS + TOTAL_NUM_WRITERS + 1, "ReadTxnWriteAutoScaleWithFailoverTest-main");
controllerExecutorService = ExecutorServiceHelpers.newScheduledThreadPool(2, "ReadTxnWriteAutoScaleWithFailoverTest-controller");
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURIDirect);
// get Controller Uri
controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).maxBackoffMillis(5000).build(), controllerExecutorService);
testState = new TestState(true);
streamManager = new StreamManagerImpl(clientConfig);
createScopeAndStream(scope, stream, config, streamManager);
log.info("Scope passed to client factory {}", scope);
clientFactory = new ClientFactoryImpl(scope, controller, new SocketConnectionFactoryImpl(clientConfig));
readerGroupManager = ReaderGroupManager.withScope(scope, clientConfig);
}
Aggregations