use of io.pravega.client.connection.impl.ConnectionPool in project pravega by pravega.
the class ControllerEventProcessorsTest method testIsReady.
@Test(timeout = 30000L)
public void testIsReady() throws Exception {
LocalController controller = mock(LocalController.class);
StreamMetadataStore streamStore = mock(StreamMetadataStore.class);
BucketStore bucketStore = mock(BucketStore.class);
ConnectionPool connectionPool = mock(ConnectionPool.class);
StreamMetadataTasks streamMetadataTasks = mock(StreamMetadataTasks.class);
StreamTransactionMetadataTasks streamTransactionMetadataTasks = mock(StreamTransactionMetadataTasks.class);
KVTableMetadataStore kvtStore = mock(KVTableMetadataStore.class);
TableMetadataTasks kvtTasks = mock(TableMetadataTasks.class);
ControllerEventProcessorConfig config = ControllerEventProcessorConfigImpl.withDefault();
EventProcessorSystem system = mock(EventProcessorSystem.class);
CuratorZookeeperClient curatorZKClientMock = mock(CuratorZookeeperClient.class);
CuratorFramework client = mock(CuratorFramework.class);
Listenable listen = mock(Listenable.class);
doNothing().when(listen).addListener(any(ConnectionStateListener.class));
doReturn(listen).when(client).getConnectionStateListenable();
doReturn(curatorZKClientMock).when(client).getZookeeperClient();
doReturn(true).when(curatorZKClientMock).isConnected();
ZKCheckpointStore checkpointStore = (ZKCheckpointStore) CheckpointStoreFactory.createZKStore(client);
doAnswer(x -> null).when(streamMetadataTasks).initializeStreamWriters(any(), any());
doAnswer(x -> null).when(streamTransactionMetadataTasks).initializeStreamWriters(any(EventStreamClientFactory.class), any(ControllerEventProcessorConfig.class));
CompletableFuture<Boolean> createScopeResponseFuture = new CompletableFuture<>();
CompletableFuture<Void> createScopeSignalFuture = new CompletableFuture<>();
doAnswer(x -> {
createScopeSignalFuture.complete(null);
return createScopeResponseFuture;
}).when(controller).createScope(anyString());
LinkedBlockingQueue<CompletableFuture<Boolean>> createStreamResponses = new LinkedBlockingQueue<>();
LinkedBlockingQueue<CompletableFuture<Void>> createStreamSignals = new LinkedBlockingQueue<>();
List<CompletableFuture<Boolean>> createStreamResponsesList = new LinkedList<>();
List<CompletableFuture<Void>> createStreamSignalsList = new LinkedList<>();
for (int i = 0; i < 4; i++) {
CompletableFuture<Boolean> responseFuture = new CompletableFuture<>();
CompletableFuture<Void> signalFuture = new CompletableFuture<>();
createStreamResponsesList.add(responseFuture);
createStreamResponses.add(responseFuture);
createStreamSignalsList.add(signalFuture);
createStreamSignals.add(signalFuture);
}
// return a future from latches queue
doAnswer(x -> {
createStreamSignals.take().complete(null);
return createStreamResponses.take();
}).when(controller).createInternalStream(anyString(), anyString(), any());
@Cleanup ControllerEventProcessors processors = spy(new ControllerEventProcessors("host1", config, controller, checkpointStore, streamStore, bucketStore, connectionPool, streamMetadataTasks, streamTransactionMetadataTasks, kvtStore, kvtTasks, system, executorService()));
// Check isReady() method before invoking bootstrap
Assert.assertFalse(processors.getBootstrapCompleted().get());
Assert.assertTrue(processors.isMetadataServiceConnected());
Assert.assertFalse(processors.isRunning());
Assert.assertFalse(processors.isReady());
// Call bootstrap on ControllerEventProcessors
processors.bootstrap(streamTransactionMetadataTasks, streamMetadataTasks, kvtTasks);
// Wait on create scope being called.
createScopeSignalFuture.join();
createScopeResponseFuture.complete(true);
createStreamSignalsList.get(0).join();
createStreamSignalsList.get(1).join();
createStreamSignalsList.get(2).join();
createStreamSignalsList.get(3).join();
createStreamResponsesList.get(0).complete(true);
createStreamResponsesList.get(1).complete(true);
createStreamResponsesList.get(2).complete(true);
createStreamResponsesList.get(3).complete(true);
AssertExtensions.assertEventuallyEquals(true, () -> processors.getBootstrapCompleted().get(), 10000);
Assert.assertTrue(processors.isMetadataServiceConnected());
Assert.assertFalse(processors.isRunning());
Assert.assertFalse(processors.isReady());
EventProcessorGroup mockEventProcessorGroup = mock(EventProcessorGroup.class);
doNothing().when(mockEventProcessorGroup).awaitRunning();
doReturn(mockEventProcessorGroup).when(system).createEventProcessorGroup(any(EventProcessorConfig.class), any(CheckpointStore.class), any(ScheduledExecutorService.class));
processors.startAsync();
processors.awaitRunning();
Assert.assertTrue(processors.isMetadataServiceConnected());
Assert.assertTrue(processors.isBootstrapCompleted());
Assert.assertTrue(processors.isRunning());
Assert.assertTrue(processors.isReady());
}
use of io.pravega.client.connection.impl.ConnectionPool in project pravega by pravega.
the class ControllerEventProcessorsTest method testHandleOrphaned.
@Test(timeout = 10000)
public void testHandleOrphaned() throws CheckpointStoreException {
LocalController localController = mock(LocalController.class);
CheckpointStore checkpointStore = mock(CheckpointStore.class);
StreamMetadataStore streamStore = mock(StreamMetadataStore.class);
BucketStore bucketStore = mock(BucketStore.class);
ConnectionPool connectionPool = mock(ConnectionPool.class);
StreamMetadataTasks streamMetadataTasks = mock(StreamMetadataTasks.class);
StreamTransactionMetadataTasks streamTransactionMetadataTasks = mock(StreamTransactionMetadataTasks.class);
KVTableMetadataStore kvtStore = mock(KVTableMetadataStore.class);
TableMetadataTasks kvtTasks = mock(TableMetadataTasks.class);
ControllerEventProcessorConfig config = ControllerEventProcessorConfigImpl.withDefault();
EventProcessorSystem system = mock(EventProcessorSystem.class);
EventProcessorGroup<ControllerEvent> processor = getProcessor();
EventProcessorGroup<ControllerEvent> mockProcessor = spy(processor);
doThrow(new CheckpointStoreException("host not found")).when(mockProcessor).notifyProcessFailure("host3");
when(system.createEventProcessorGroup(any(), any(), any())).thenReturn(mockProcessor);
@Cleanup ControllerEventProcessors processors = new ControllerEventProcessors("host1", config, localController, checkpointStore, streamStore, bucketStore, connectionPool, streamMetadataTasks, streamTransactionMetadataTasks, kvtStore, kvtTasks, system, executorService());
// check for a case where init is not initialized so that kvtRequestProcessors don't get initialized and will be null
assertTrue(Futures.await(processors.sweepFailedProcesses(() -> Sets.newHashSet("host1"))));
Assert.assertFalse(processors.isReady());
Assert.assertFalse(processors.isBootstrapCompleted());
Assert.assertFalse(processors.isMetadataServiceConnected());
processors.startAsync();
processors.awaitRunning();
assertTrue(Futures.await(processors.sweepFailedProcesses(() -> Sets.newHashSet("host1"))));
assertTrue(Futures.await(processors.handleFailedProcess("host1")));
AssertExtensions.assertFutureThrows("host not found", processors.handleFailedProcess("host3"), e -> e instanceof CheckpointStoreException);
processors.shutDown();
}
use of io.pravega.client.connection.impl.ConnectionPool in project pravega by pravega.
the class ReadWriteTest method readWriteTest.
@Test(timeout = 60000)
public void readWriteTest() throws InterruptedException, ExecutionException {
String scope = "testMultiReaderWriterScope";
String readerGroupName = "testMultiReaderWriterReaderGroup";
// 20 readers -> 20 stream segments ( to have max read parallelism)
ScalingPolicy scalingPolicy = ScalingPolicy.fixed(20);
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(scalingPolicy).build();
eventsReadFromPravega = new ConcurrentLinkedQueue<>();
// data used by each of the writers.
eventData = new AtomicLong();
// used by readers to maintain a count of events.
eventReadCount = new AtomicLong();
stopReadFlag = new AtomicBoolean(false);
ClientConfig clientConfig = ClientConfig.builder().build();
try (ConnectionPool cp = new ConnectionPoolImpl(clientConfig, new SocketConnectionFactoryImpl(clientConfig));
StreamManager streamManager = new StreamManagerImpl(controller, cp)) {
// create a scope
Boolean createScopeStatus = streamManager.createScope(scope);
log.info("Create scope status {}", createScopeStatus);
// create a stream
Boolean createStreamStatus = streamManager.createStream(scope, STREAM_NAME, config);
log.info("Create stream status {}", createStreamStatus);
}
try (ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory)) {
// start writing events to the stream
log.info("Creating {} writers", NUM_WRITERS);
List<CompletableFuture<Void>> writerList = new ArrayList<>();
for (int i = 0; i < NUM_WRITERS; i++) {
log.info("Starting writer{}", i);
writerList.add(startNewWriter(eventData, clientFactory));
}
// create a reader group
log.info("Creating Reader group : {}", readerGroupName);
readerGroupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, STREAM_NAME)).build());
log.info("Reader group name {} ", readerGroupManager.getReaderGroup(readerGroupName).getGroupName());
log.info("Reader group scope {}", readerGroupManager.getReaderGroup(readerGroupName).getScope());
// create readers
log.info("Creating {} readers", NUM_READERS);
List<CompletableFuture<Void>> readerList = new ArrayList<>();
String readerName = "reader" + RandomFactory.create().nextInt(Integer.MAX_VALUE);
// start reading events
for (int i = 0; i < NUM_READERS; i++) {
log.info("Starting reader{}", i);
readerList.add(startNewReader(readerName + i, clientFactory, readerGroupName, eventsReadFromPravega, eventData, eventReadCount, stopReadFlag));
}
// wait for writers completion
Futures.allOf(writerList).get();
ExecutorServiceHelpers.shutdown(writerPool);
// set stop read flag to true
stopReadFlag.set(true);
// wait for readers completion
Futures.allOf(readerList).get();
ExecutorServiceHelpers.shutdown(readerPool);
// delete readergroup
log.info("Deleting readergroup {}", readerGroupName);
readerGroupManager.deleteReaderGroup(readerGroupName);
}
log.info("All writers have stopped. Setting Stop_Read_Flag. Event Written Count:{}, Event Read " + "Count: {}", eventData.get(), eventsReadFromPravega.size());
assertEquals(TOTAL_NUM_EVENTS, eventsReadFromPravega.size());
// check unique events.
assertEquals(TOTAL_NUM_EVENTS, new TreeSet<>(eventsReadFromPravega).size());
// seal the stream
CompletableFuture<Boolean> sealStreamStatus = controller.sealStream(scope, STREAM_NAME);
log.info("Sealing stream {}", STREAM_NAME);
assertTrue(sealStreamStatus.get());
// delete the stream
CompletableFuture<Boolean> deleteStreamStatus = controller.deleteStream(scope, STREAM_NAME);
log.info("Deleting stream {}", STREAM_NAME);
assertTrue(deleteStreamStatus.get());
// delete the scope
CompletableFuture<Boolean> deleteScopeStatus = controller.deleteScope(scope);
log.info("Deleting scope {}", scope);
assertTrue(deleteScopeStatus.get());
log.info("Read write test succeeds");
}
use of io.pravega.client.connection.impl.ConnectionPool in project pravega by pravega.
the class MetricsTest method metricsTimeBasedCacheEvictionTest.
@Test(timeout = 120000)
public void metricsTimeBasedCacheEvictionTest() throws Exception {
ClientConfig clientConfig = ClientConfig.builder().build();
try (ConnectionPool cp = new ConnectionPoolImpl(clientConfig, new SocketConnectionFactoryImpl(clientConfig));
StreamManager streamManager = new StreamManagerImpl(controller, cp)) {
boolean createScopeStatus = streamManager.createScope(scope);
log.info("Create scope status {}", createScopeStatus);
boolean createStreamStatus = streamManager.createStream(scope, STREAM_NAME, config);
log.info("Create stream status {}", createStreamStatus);
}
try (ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory)) {
@Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM_NAME, new UTF8StringSerializer(), EventWriterConfig.builder().build());
String event = "12345";
long bytesWritten = TOTAL_NUM_EVENTS * (8 + event.length());
writeEvents(event, writer1);
String readerGroupName1 = readerGroupName + "1";
log.info("Creating Reader group : {}", readerGroupName1);
readerGroupManager.createReaderGroup(readerGroupName1, ReaderGroupConfig.builder().stream(Stream.of(scope, STREAM_NAME)).automaticCheckpointIntervalMillis(2000).build());
EventStreamReader<String> reader1 = clientFactory.createReader(readerName, readerGroupName1, new UTF8StringSerializer(), ReaderConfig.builder().build());
readAllEvents(reader1);
final String[] streamTags = segmentTags(scope + "/" + STREAM_NAME + "/0.#epoch.0");
assertEquals(bytesWritten, (long) MetricRegistryUtils.getCounter(SEGMENT_READ_BYTES, streamTags).count());
// Wait for cache eviction to happen
Thread.sleep(5000);
String readerGroupName2 = readerGroupName + "2";
log.info("Creating Reader group : {}", readerGroupName2);
readerGroupManager.createReaderGroup(readerGroupName2, ReaderGroupConfig.builder().stream(Stream.of(scope, STREAM_NAME)).automaticCheckpointIntervalMillis(2000).build());
EventStreamReader<String> reader2 = clientFactory.createReader(readerName, readerGroupName2, new UTF8StringSerializer(), ReaderConfig.builder().build());
readAllEvents(reader2);
// Metric is evicted from Cache, after cache eviction duration
// Count starts from 0, rather than adding up to previously ready bytes, as cache is evicted.
assertEquals(bytesWritten, (long) MetricRegistryUtils.getCounter(SEGMENT_READ_BYTES, streamTags).count());
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 1.0);
// Seal segment 0, create segment 1
CompletableFuture<Boolean> scaleStatus = controller.scaleStream(new StreamImpl(scope, STREAM_NAME), Collections.singletonList(0L), map, executorService()).getFuture();
Assert.assertTrue(scaleStatus.get());
@Cleanup EventStreamWriter<String> writer2 = clientFactory.createEventWriter(STREAM_NAME, new UTF8StringSerializer(), EventWriterConfig.builder().build());
writeEvents(event, writer2);
readAllEvents(reader1);
final String[] streamTags2nd = segmentTags(scope + "/" + STREAM_NAME + "/1.#epoch.1");
assertEquals(bytesWritten, (long) MetricRegistryUtils.getCounter(SEGMENT_READ_BYTES, streamTags2nd).count());
readerGroupManager.deleteReaderGroup(readerGroupName1);
readerGroupManager.deleteReaderGroup(readerGroupName2);
CompletableFuture<Boolean> sealStreamStatus = controller.sealStream(scope, STREAM_NAME);
log.info("Sealing stream {}", STREAM_NAME);
assertTrue(sealStreamStatus.get());
CompletableFuture<Boolean> deleteStreamStatus = controller.deleteStream(scope, STREAM_NAME);
log.info("Deleting stream {}", STREAM_NAME);
assertTrue(deleteStreamStatus.get());
CompletableFuture<Boolean> deleteScopeStatus = controller.deleteScope(scope);
log.info("Deleting scope {}", scope);
assertTrue(deleteScopeStatus.get());
}
log.info("Metrics Time based Cache Eviction test succeeds");
}
use of io.pravega.client.connection.impl.ConnectionPool in project pravega by pravega.
the class WriteBatchTest method readWriteTest.
@Test(timeout = 60000)
public void readWriteTest() throws InterruptedException, ExecutionException {
String scope = "testBatchWrite";
String readerGroupName = "testBatchWriteRG";
// 20 readers -> 20 stream segments ( to have max read parallelism)
ScalingPolicy scalingPolicy = ScalingPolicy.fixed(20);
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(scalingPolicy).build();
ConcurrentLinkedQueue<Long> eventsReadFromPravega = new ConcurrentLinkedQueue<>();
AtomicLong eventData = new AtomicLong();
AtomicLong eventReadCount = new AtomicLong();
AtomicBoolean stopReadFlag = new AtomicBoolean(false);
ClientConfig clientConfig = ClientConfig.builder().build();
try (ConnectionPool cp = new ConnectionPoolImpl(clientConfig, new SocketConnectionFactoryImpl(clientConfig));
StreamManager streamManager = new StreamManagerImpl(controller, cp)) {
// create a scope
Boolean createScopeStatus = streamManager.createScope(scope);
log.info("Create scope status {}", createScopeStatus);
// create a stream
Boolean createStreamStatus = streamManager.createStream(scope, STREAM_NAME, config);
log.info("Create stream status {}", createStreamStatus);
}
try (ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory)) {
// start writing events to the stream
log.info("Creating {} writers", NUM_WRITERS);
List<CompletableFuture<Void>> writerList = new ArrayList<>();
for (int i = 0; i < NUM_WRITERS; i++) {
log.info("Starting writer{}", i);
writerList.add(startNewWriter(eventData, clientFactory));
}
// create a reader group
log.info("Creating Reader group : {}", readerGroupName);
readerGroupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, STREAM_NAME)).build());
log.info("Reader group name {} ", readerGroupManager.getReaderGroup(readerGroupName).getGroupName());
log.info("Reader group scope {}", readerGroupManager.getReaderGroup(readerGroupName).getScope());
// create readers
log.info("Creating {} readers", NUM_READERS);
List<CompletableFuture<Void>> readerList = new ArrayList<>();
String readerName = "reader" + RandomFactory.create().nextInt(Integer.MAX_VALUE);
// start reading events
for (int i = 0; i < NUM_READERS; i++) {
log.info("Starting reader{}", i);
readerList.add(startNewReader(readerName + i, clientFactory, readerGroupName, eventsReadFromPravega, eventData, eventReadCount, stopReadFlag));
}
// wait for writers completion
Futures.allOf(writerList).get();
// set stop read flag to true
stopReadFlag.set(true);
// wait for readers completion
Futures.allOf(readerList).get();
ExecutorServiceHelpers.shutdown(writerPool);
ExecutorServiceHelpers.shutdown(readerPool);
// delete readergroup
log.info("Deleting readergroup {}", readerGroupName);
readerGroupManager.deleteReaderGroup(readerGroupName);
}
log.info("All writers have stopped. Setting Stop_Read_Flag. Event Written Count:{}, Event Read " + "Count: {}", eventData.get(), eventsReadFromPravega.size());
assertEquals(totalNumberOfEvents.get(), eventsReadFromPravega.size());
// check unique events.
assertEquals(totalNumberOfEvents.get(), new TreeSet<>(eventsReadFromPravega).size());
// seal the stream
CompletableFuture<Boolean> sealStreamStatus = controller.sealStream(scope, STREAM_NAME);
log.info("Sealing stream {}", STREAM_NAME);
assertTrue(sealStreamStatus.get());
// delete the stream
CompletableFuture<Boolean> deleteStreamStatus = controller.deleteStream(scope, STREAM_NAME);
log.info("Deleting stream {}", STREAM_NAME);
assertTrue(deleteStreamStatus.get());
// delete the scope
CompletableFuture<Boolean> deleteScopeStatus = controller.deleteScope(scope);
log.info("Deleting scope {}", scope);
assertTrue(deleteScopeStatus.get());
log.info("Read write test succeeds");
}
Aggregations