use of io.pravega.common.cluster.Host in project pravega by pravega.
the class HostStoreTest method validateStore.
private void validateStore(HostControllerStore hostStore) {
// Validate store values.
Assert.assertEquals(containerCount, hostStore.getContainerCount());
Host hostObj = hostStore.getHostForSegment("dummyScope", "dummyStream", (int) Math.floor(containerCount * Math.random()));
Assert.assertEquals(controllerPort, hostObj.getPort());
Assert.assertEquals(host, hostObj.getIpAddr());
}
use of io.pravega.common.cluster.Host in project pravega by pravega.
the class ClusterZKTest method deregisterNode.
@Test(timeout = TEST_TIMEOUT)
public void deregisterNode() throws Exception {
LinkedBlockingQueue<String> nodeAddedQueue = new LinkedBlockingQueue<>();
LinkedBlockingQueue<String> nodeRemovedQueue = new LinkedBlockingQueue<>();
LinkedBlockingQueue<Exception> exceptionsQueue = new LinkedBlockingQueue<>();
CuratorFramework client2 = CuratorFrameworkFactory.builder().connectString(zkUrl).retryPolicy(new ExponentialBackoffRetry(RETRY_SLEEP_MS, MAX_RETRY)).namespace(CLUSTER_NAME_2).build();
@Cleanup Cluster clusterListener = new ClusterZKImpl(client2, ClusterType.HOST);
clusterListener.addListener((eventType, host) -> {
switch(eventType) {
case HOST_ADDED:
nodeAddedQueue.offer(host.getIpAddr());
break;
case HOST_REMOVED:
nodeRemovedQueue.offer(host.getIpAddr());
break;
case ERROR:
exceptionsQueue.offer(new RuntimeException("Encountered error"));
break;
default:
exceptionsQueue.offer(new RuntimeException("Unhandled case"));
break;
}
});
CuratorFramework client = CuratorFrameworkFactory.builder().connectString(zkUrl).retryPolicy(new ExponentialBackoffRetry(RETRY_SLEEP_MS, MAX_RETRY)).namespace(CLUSTER_NAME_2).build();
// Create Add a node to the cluster.
@Cleanup Cluster clusterZKInstance1 = new ClusterZKImpl(client, ClusterType.HOST);
clusterZKInstance1.registerHost(new Host(HOST_1, PORT, null));
assertEquals(HOST_1, nodeAddedQueue.poll(5, TimeUnit.SECONDS));
clusterZKInstance1.deregisterHost(new Host(HOST_1, PORT, null));
assertEquals(HOST_1, nodeRemovedQueue.poll(5, TimeUnit.SECONDS));
Exception exception = exceptionsQueue.poll();
if (exception != null) {
throw exception;
}
}
use of io.pravega.common.cluster.Host in project pravega by pravega.
the class ZKSegmentContainerMonitorTest method testStartAndStopContainer.
@Test
public void testStartAndStopContainer() throws Exception {
@Cleanup CuratorFramework zkClient = startClient();
initializeHostContainerMapping(zkClient);
SegmentContainerRegistry containerRegistry = createMockContainerRegistry();
@Cleanup ZKSegmentContainerMonitor segMonitor = createContainerMonitor(containerRegistry, zkClient);
segMonitor.initialize(Duration.ofSeconds(1));
// Simulate a container that starts successfully.
CompletableFuture<ContainerHandle> startupFuture = new CompletableFuture<>();
ContainerHandle containerHandle = mock(ContainerHandle.class);
when(containerHandle.getContainerId()).thenReturn(2);
when(containerRegistry.startContainer(eq(2), any())).thenReturn(startupFuture);
// Now modify the ZK entry.
HashMap<Host, Set<Integer>> currentData = deserialize(zkClient, PATH);
currentData.put(PRAVEGA_SERVICE_ENDPOINT, Collections.singleton(2));
zkClient.setData().forPath(PATH, SerializationUtils.serialize(currentData));
// Container finished starting.
startupFuture.complete(containerHandle);
verify(containerRegistry, timeout(1000).atLeastOnce()).startContainer(eq(2), any());
Thread.sleep(2000);
assertEquals(1, segMonitor.getRegisteredContainers().size());
assertTrue(segMonitor.getRegisteredContainers().contains(2));
// Now modify the ZK entry. Remove container 2 and add 1.
HashMap<Host, Set<Integer>> newMapping = new HashMap<>();
newMapping.put(PRAVEGA_SERVICE_ENDPOINT, Collections.singleton(1));
zkClient.setData().forPath(PATH, SerializationUtils.serialize(newMapping));
// Verify that stop is called and only the newly added container is in running state.
when(containerRegistry.stopContainer(any(), any())).thenReturn(CompletableFuture.completedFuture(null));
verify(containerRegistry, timeout(1000).atLeastOnce()).stopContainer(any(), any());
// Using wait here to ensure the private data structure is updated.
// TODO: Removing dependency on sleep here and other places in this class
// - https://github.com/pravega/pravega/issues/1079
Thread.sleep(2000);
assertEquals(1, segMonitor.getRegisteredContainers().size());
assertTrue(segMonitor.getRegisteredContainers().contains(1));
}
use of io.pravega.common.cluster.Host in project pravega by pravega.
the class ZKSegmentContainerMonitorTest method testRetryOnStartFailures.
@Test
public void testRetryOnStartFailures() throws Exception {
@Cleanup CuratorFramework zkClient = startClient();
initializeHostContainerMapping(zkClient);
SegmentContainerRegistry containerRegistry = createMockContainerRegistry();
@Cleanup ZKSegmentContainerMonitor segMonitor = createContainerMonitor(containerRegistry, zkClient);
segMonitor.initialize(Duration.ofSeconds(1));
// Simulate a container that fails to start.
CompletableFuture<ContainerHandle> failedFuture = Futures.failedFuture(new RuntimeException());
when(containerRegistry.startContainer(eq(2), any())).thenReturn(failedFuture);
// Use ZK to send that information to the Container Manager.
HashMap<Host, Set<Integer>> currentData = deserialize(zkClient, PATH);
currentData.put(PRAVEGA_SERVICE_ENDPOINT, Collections.singleton(2));
zkClient.setData().forPath(PATH, SerializationUtils.serialize(currentData));
// Verify that it does not start.
verify(containerRegistry, timeout(1000).atLeastOnce()).startContainer(eq(2), any());
assertEquals(0, segMonitor.getRegisteredContainers().size());
// Now simulate success for the same container.
ContainerHandle containerHandle = mock(ContainerHandle.class);
when(containerHandle.getContainerId()).thenReturn(2);
when(containerRegistry.startContainer(eq(2), any())).thenReturn(CompletableFuture.completedFuture(containerHandle));
// Verify that it retries and starts the same container again.
verify(containerRegistry, timeout(1000).atLeastOnce()).startContainer(eq(2), any());
Thread.sleep(2000);
assertEquals(1, segMonitor.getRegisteredContainers().size());
}
use of io.pravega.common.cluster.Host in project pravega by pravega.
the class ClusterZKTest method registerNode.
@Test(timeout = TEST_TIMEOUT)
public void registerNode() throws Exception {
LinkedBlockingQueue<String> nodeAddedQueue = new LinkedBlockingQueue<>();
LinkedBlockingQueue<String> nodeRemovedQueue = new LinkedBlockingQueue<>();
LinkedBlockingQueue<Exception> exceptionsQueue = new LinkedBlockingQueue<>();
// ClusterListener for testing purposes
CuratorFramework client2 = CuratorFrameworkFactory.builder().connectString(zkUrl).retryPolicy(new ExponentialBackoffRetry(RETRY_SLEEP_MS, MAX_RETRY)).namespace(CLUSTER_NAME).build();
@Cleanup Cluster clusterListener = new ClusterZKImpl(client2, ClusterType.HOST);
clusterListener.addListener((eventType, host) -> {
switch(eventType) {
case HOST_ADDED:
nodeAddedQueue.offer(host.getIpAddr());
break;
case HOST_REMOVED:
nodeRemovedQueue.offer(host.getIpAddr());
break;
case ERROR:
exceptionsQueue.offer(new RuntimeException("Encountered error"));
break;
default:
exceptionsQueue.offer(new RuntimeException("Unhandled case"));
break;
}
});
CuratorFramework client = CuratorFrameworkFactory.builder().connectString(zkUrl).retryPolicy(new ExponentialBackoffRetry(RETRY_SLEEP_MS, MAX_RETRY)).namespace(CLUSTER_NAME).build();
// Create Add a node to the cluster.
@Cleanup Cluster clusterZKInstance1 = new ClusterZKImpl(client, ClusterType.HOST);
clusterZKInstance1.registerHost(new Host(HOST_1, PORT, null));
assertEquals(HOST_1, nodeAddedQueue.poll(5, TimeUnit.SECONDS));
// Create a separate instance of Cluster and add node to same Cluster
@Cleanup Cluster clusterZKInstance2 = new ClusterZKImpl(client, ClusterType.HOST);
clusterZKInstance1.registerHost(new Host(HOST_2, PORT, null));
assertEquals(HOST_2, nodeAddedQueue.poll(5, TimeUnit.SECONDS));
assertEquals(2, clusterListener.getClusterMembers().size());
Exception exception = exceptionsQueue.poll();
if (exception != null) {
throw exception;
}
}
Aggregations