use of io.pravega.client.connection.impl.ConnectionFactory in project pravega by pravega.
the class ByteClientTest method createClientFactory.
ByteStreamClientFactory createClientFactory(String scope) {
ClientConfig config = ClientConfig.builder().build();
ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(config);
ControllerImpl controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(Utils.buildClientConfig(controllerURI)).build(), connectionFactory.getInternalExecutor());
ConnectionPool pool = new ConnectionPoolImpl(config, connectionFactory);
val inputStreamFactory = new SegmentInputStreamFactoryImpl(controller, pool);
val outputStreamFactory = new SegmentOutputStreamFactoryImpl(controller, pool);
val metaStreamFactory = new SegmentMetadataClientFactoryImpl(controller, pool);
return new ByteStreamClientImpl(scope, controller, pool, inputStreamFactory, outputStreamFactory, metaStreamFactory);
}
use of io.pravega.client.connection.impl.ConnectionFactory in project pravega by pravega.
the class BatchClientSimpleTest method batchClientSimpleTest.
/**
* This test verifies the basic functionality of {@link BatchClientFactory}, including stream metadata checks, segment
* counts, parallel segment reads and reads with offsets using stream cuts.
*/
@Test
@SuppressWarnings("deprecation")
public void batchClientSimpleTest() {
final int totalEvents = RG_PARALLELISM * 100;
final int offsetEvents = RG_PARALLELISM * 20;
final int batchIterations = 4;
final Stream stream = Stream.of(SCOPE, STREAM);
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(clientConfig);
ControllerImpl controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), connectionFactory.getInternalExecutor());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionFactory);
@Cleanup BatchClientFactory batchClient = BatchClientFactory.withScope(SCOPE, clientConfig);
log.info("Invoking batchClientSimpleTest test with Controller URI: {}", controllerURI);
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, clientConfig);
groupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(SCOPE + "/" + STREAM).build());
ReaderGroup readerGroup = groupManager.getReaderGroup(READER_GROUP);
log.info("Writing events to stream");
// Write events to the Stream.
writeEvents(clientFactory, STREAM, totalEvents);
// Instantiate readers to consume from Stream up to truncatedEvents.
List<CompletableFuture<Integer>> futures = readEventFutures(clientFactory, READER_GROUP, RG_PARALLELISM, offsetEvents);
Futures.allOf(futures).join();
// Create a stream cut on the specified offset position.
Checkpoint cp = readerGroup.initiateCheckpoint("batchClientCheckpoint", executor).join();
StreamCut streamCut = cp.asImpl().getPositions().values().iterator().next();
// Instantiate the batch client and assert it provides correct stream info.
log.debug("Creating batch client.");
StreamInfo streamInfo = streamManager.getStreamInfo(SCOPE, stream.getStreamName());
log.debug("Validating stream metadata fields.");
assertEquals("Expected Stream name: ", STREAM, streamInfo.getStreamName());
assertEquals("Expected Scope name: ", SCOPE, streamInfo.getScope());
// Test that we can read events from parallel segments from an offset onwards.
log.debug("Reading events from stream cut onwards in parallel.");
List<SegmentRange> ranges = Lists.newArrayList(batchClient.getSegments(stream, streamCut, StreamCut.UNBOUNDED).getIterator());
assertEquals("Expected events read: ", totalEvents - offsetEvents, readFromRanges(ranges, batchClient));
// Emulate the behavior of Hadoop client: i) Get tail of Stream, ii) Read from current point until tail, iii) repeat.
log.debug("Reading in batch iterations.");
StreamCut currentTailStreamCut = streamManager.getStreamInfo(SCOPE, stream.getStreamName()).getTailStreamCut();
int readEvents = 0;
for (int i = 0; i < batchIterations; i++) {
writeEvents(clientFactory, STREAM, totalEvents);
// Read all the existing events in parallel segments from the previous tail to the current one.
ranges = Lists.newArrayList(batchClient.getSegments(stream, currentTailStreamCut, StreamCut.UNBOUNDED).getIterator());
assertEquals("Expected number of segments: ", RG_PARALLELISM, ranges.size());
readEvents += readFromRanges(ranges, batchClient);
log.debug("Events read in parallel so far: {}.", readEvents);
currentTailStreamCut = streamManager.getStreamInfo(SCOPE, stream.getStreamName()).getTailStreamCut();
}
assertEquals("Expected events read: .", totalEvents * batchIterations, readEvents);
// Truncate the stream in first place.
log.debug("Truncating stream at event {}.", offsetEvents);
assertTrue(controller.truncateStream(SCOPE, STREAM, streamCut).join());
// Test the batch client when we select to start reading a Stream from a truncation point.
StreamCut initialPosition = streamManager.getStreamInfo(SCOPE, stream.getStreamName()).getHeadStreamCut();
List<SegmentRange> newRanges = Lists.newArrayList(batchClient.getSegments(stream, initialPosition, StreamCut.UNBOUNDED).getIterator());
assertEquals("Expected events read: ", (totalEvents - offsetEvents) + totalEvents * batchIterations, readFromRanges(newRanges, batchClient));
log.debug("Events correctly read from Stream: simple batch client test passed.");
}
use of io.pravega.client.connection.impl.ConnectionFactory in project pravega by pravega.
the class LargeEventTest method largeEventSimpleTest.
/**
* Invoke the largeEventSimpleTest, ensure we are able to produce events.
* The test fails incase of exceptions while writing to the stream.
*/
@Test
public void largeEventSimpleTest() {
Service conService = Utils.createPravegaControllerService(null);
List<URI> ctlURIs = conService.getServiceDetails();
URI controllerUri = ctlURIs.get(0);
log.info("Invoking create stream with Controller URI: {}", controllerUri);
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(Utils.buildClientConfig(controllerUri));
@Cleanup ControllerImpl controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(Utils.buildClientConfig(controllerUri)).build(), connectionFactory.getInternalExecutor());
assertTrue(controller.createScope(STREAM_SCOPE).join());
assertTrue(controller.createStream(STREAM_SCOPE, STREAM_NAME, config).join());
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(STREAM_SCOPE, Utils.buildClientConfig(controllerUri));
log.info("Invoking Writer test with Controller URI: {}", controllerUri);
@Cleanup EventStreamWriter<ByteBuffer> writer = clientFactory.createEventWriter(STREAM_NAME, new ByteBufferSerializer(), EventWriterConfig.builder().build());
byte[] payload = new byte[Serializer.MAX_EVENT_SIZE];
for (int i = 0; i < NUM_EVENTS; i++) {
log.debug("Producing event: {} ", i);
// any exceptions while writing the event will fail the test.
writer.writeEvent("", ByteBuffer.wrap(payload));
writer.flush();
}
log.info("Invoking Reader test.");
ReaderGroupManager groupManager = ReaderGroupManager.withScope(STREAM_SCOPE, Utils.buildClientConfig(controllerUri));
groupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().stream(Stream.of(STREAM_SCOPE, STREAM_NAME)).build());
@Cleanup EventStreamReader<ByteBuffer> reader = clientFactory.createReader(UUID.randomUUID().toString(), READER_GROUP, new ByteBufferSerializer(), ReaderConfig.builder().build());
int readCount = 0;
EventRead<ByteBuffer> event = null;
do {
event = reader.readNextEvent(10_000);
log.debug("Read event: {}.", event.getEvent());
if (event.getEvent() != null) {
readCount++;
}
// try reading until all the written events are read, else the test will timeout.
} while ((event.getEvent() != null || event.isCheckpoint()) && readCount < NUM_EVENTS);
assertEquals("Read count should be equal to write count", NUM_EVENTS, readCount);
}
use of io.pravega.client.connection.impl.ConnectionFactory in project pravega by pravega.
the class ByteStreamTest method createClientFactory.
ByteStreamClientFactory createClientFactory(String scope) {
ClientConfig config = ClientConfig.builder().build();
ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(config);
ConnectionPool pool = new ConnectionPoolImpl(config, connectionFactory);
val inputStreamFactory = new SegmentInputStreamFactoryImpl(PRAVEGA.getLocalController(), pool);
val outputStreamFactory = new SegmentOutputStreamFactoryImpl(PRAVEGA.getLocalController(), pool);
val metaStreamFactory = new SegmentMetadataClientFactoryImpl(PRAVEGA.getLocalController(), pool);
return new ByteStreamClientImpl(scope, PRAVEGA.getLocalController(), pool, inputStreamFactory, outputStreamFactory, metaStreamFactory);
}
use of io.pravega.client.connection.impl.ConnectionFactory in project pravega by pravega.
the class EndToEndTransactionTest method main.
@Test
public static void main(String[] args) throws Exception {
@Cleanup TestingServer zkTestServer = new TestingServerStarter().start();
ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
int port = Config.SERVICE_PORT;
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, port, store, serviceBuilder.createTableStoreService(), serviceBuilder.getLowPriorityExecutor(), Config.TLS_PROTOCOL_VERSION.toArray(new String[Config.TLS_PROTOCOL_VERSION.size()]));
server.startListening();
Thread.sleep(1000);
@Cleanup ControllerWrapper controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), port);
Controller controller = controllerWrapper.getController();
controllerWrapper.awaitRunning();
final String testScope = "testScope";
final String testStream = "testStream";
if (!controller.createScope(testScope).get()) {
log.error("FAILURE: Error creating test scope");
return;
}
ScalingPolicy policy = ScalingPolicy.fixed(5);
StreamConfiguration streamConfig = StreamConfiguration.builder().scalingPolicy(policy).build();
if (!controller.createStream(testScope, testStream, streamConfig).get()) {
log.error("FAILURE: Error creating test stream");
return;
}
final long txnTimeout = 4000;
ClientConfig config = ClientConfig.builder().build();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(config);
@Cleanup MockClientFactory clientFactory = new MockClientFactory(testScope, controller, new ConnectionPoolImpl(config, connectionFactory));
@Cleanup TransactionalEventStreamWriter<String> producer = clientFactory.createTransactionalEventWriter("writer", testStream, new UTF8StringSerializer(), EventWriterConfig.builder().transactionTimeoutTime(txnTimeout).build());
// region Successful commit tests
Transaction<String> transaction = producer.beginTxn();
for (int i = 0; i < 1; i++) {
String event = "\n Transactional Publish \n";
log.info("Producing event: " + event);
transaction.writeEvent("", event);
transaction.flush();
Thread.sleep(500);
}
CompletableFuture<Object> commit = CompletableFuture.supplyAsync(() -> {
try {
transaction.commit();
} catch (Exception e) {
log.warn("Error committing transaction", e);
}
return null;
});
commit.join();
Transaction.Status txnStatus = transaction.checkStatus();
assertTrue(txnStatus == Transaction.Status.COMMITTING || txnStatus == Transaction.Status.COMMITTED);
log.info("SUCCESS: successful in committing transaction. Transaction status=" + txnStatus);
Thread.sleep(2000);
txnStatus = transaction.checkStatus();
assertTrue(txnStatus == Transaction.Status.COMMITTED);
log.info("SUCCESS: successfully committed transaction. Transaction status=" + txnStatus);
// endregion
// region Successful abort tests
Transaction<String> transaction2 = producer.beginTxn();
for (int i = 0; i < 1; i++) {
String event = "\n Transactional Publish \n";
log.info("Producing event: " + event);
transaction2.writeEvent("", event);
transaction2.flush();
Thread.sleep(500);
}
CompletableFuture<Object> drop = CompletableFuture.supplyAsync(() -> {
try {
transaction2.abort();
} catch (Exception e) {
log.warn("Error aborting transaction", e);
}
return null;
});
drop.join();
Transaction.Status txn2Status = transaction2.checkStatus();
assertTrue(txn2Status == Transaction.Status.ABORTING || txn2Status == Transaction.Status.ABORTED);
log.info("SUCCESS: successful in dropping transaction. Transaction status=" + txn2Status);
Thread.sleep(2000);
txn2Status = transaction2.checkStatus();
assertTrue(txn2Status == Transaction.Status.ABORTED);
log.info("SUCCESS: successfully aborted transaction. Transaction status=" + txn2Status);
// endregion
// region Successful timeout tests
Transaction<String> tx1 = producer.beginTxn();
Thread.sleep((long) (1.3 * txnTimeout));
Transaction.Status txStatus = tx1.checkStatus();
Assert.assertTrue(Transaction.Status.ABORTING == txStatus || Transaction.Status.ABORTED == txStatus);
log.info("SUCCESS: successfully aborted transaction after timeout. Transaction status=" + txStatus);
// endregion
// region Ping failure due to controller going into disconnection state
// Fill in these tests once we have controller.stop() implemented.
System.exit(0);
}
Aggregations