use of com.hortonworks.streamline.streams.cluster.catalog.Cluster in project streamline by hortonworks.
the class StormServiceRegistrarTest method testRegister.
@Test
public void testRegister() throws Exception {
Cluster cluster = getTestCluster(1L);
StormServiceRegistrar registrar = initializeServiceRegistrar();
// explicit convert Object
Config config = new Config();
config.put(StormServiceRegistrar.PARAM_NIMBUS_SEEDS, "storm-1,storm-2");
config.put(StormServiceRegistrar.PARAM_NIMBUS_THRIFT_PORT, (Object) 6627);
config.put(StormServiceRegistrar.PARAM_UI_HOST, "storm-1");
config.put(StormServiceRegistrar.PARAM_UI_PORT, (Object) 8080);
config.put(StormServiceRegistrar.PARAM_NIMBUS_THRIFT_MAX_BUFFER_SIZE, (Object) 102476800);
config.put(StormServiceRegistrar.PARAM_THRIFT_TRANSPORT, "org.apache.storm.security.auth.SimpleTransportPlugin");
config.put(StormServiceRegistrar.PARAM_PRINCIPAL_TO_LOCAL, "org.apache.storm.security.auth.DefaultPrincipalToLocal");
config.put(StormServiceRegistrar.PARAM_NIMBUS_PRINCIPAL_NAME, "nimbus/_HOST@EXAMPLE.COM");
registrar.register(cluster, config, Collections.emptyList());
Service stormService = environmentService.getServiceByName(cluster.getId(), Constants.Storm.SERVICE_NAME);
assertNotNull(stormService);
Component nimbus = environmentService.getComponentByName(stormService.getId(), ComponentPropertyPattern.NIMBUS.name());
assertNotNull(nimbus);
Collection<ComponentProcess> nimbusProcesses = environmentService.listComponentProcesses(nimbus.getId());
List<String> hosts = nimbusProcesses.stream().map(ComponentProcess::getHost).collect(Collectors.toList());
assertEquals(Sets.newHashSet("storm-1", "storm-2"), new HashSet<>(hosts));
List<Integer> ports = nimbusProcesses.stream().map(ComponentProcess::getPort).collect(Collectors.toList());
assertEquals(Sets.newHashSet(6627, 6627), new HashSet<>(ports));
Component ui = environmentService.getComponentByName(stormService.getId(), ComponentPropertyPattern.STORM_UI_SERVER.name());
assertNotNull(ui);
Collection<ComponentProcess> uiProcesses = environmentService.listComponentProcesses(ui.getId());
assertEquals(Sets.newHashSet("storm-1"), uiProcesses.stream().map(ComponentProcess::getHost).collect(Collectors.toSet()));
assertEquals(Sets.newHashSet(8080), uiProcesses.stream().map(ComponentProcess::getPort).collect(Collectors.toSet()));
ServiceConfiguration stormYamlConf = environmentService.getServiceConfigurationByName(stormService.getId(), CONFIGURATION_NAME_STORM_YAML);
assertNotNull(stormYamlConf);
Map<String, String> stormYamlConfMap = stormYamlConf.getConfigurationMap();
assertEquals(config.getAny(StormServiceRegistrar.PARAM_NIMBUS_THRIFT_MAX_BUFFER_SIZE), Integer.valueOf(stormYamlConfMap.get(StormServiceRegistrar.PARAM_NIMBUS_THRIFT_MAX_BUFFER_SIZE)));
assertEquals(config.get(StormServiceRegistrar.PARAM_THRIFT_TRANSPORT), stormYamlConfMap.get(StormServiceRegistrar.PARAM_THRIFT_TRANSPORT));
assertEquals(config.get(StormServiceRegistrar.PARAM_PRINCIPAL_TO_LOCAL), stormYamlConfMap.get(StormServiceRegistrar.PARAM_PRINCIPAL_TO_LOCAL));
ServiceConfiguration stormEnvConf = environmentService.getServiceConfigurationByName(stormService.getId(), CONFIGURATION_NAME_STORM_ENV);
assertNotNull(stormEnvConf);
Map<String, String> stormEnvConfMap = stormEnvConf.getConfigurationMap();
assertEquals(config.get(StormServiceRegistrar.PARAM_NIMBUS_PRINCIPAL_NAME), stormEnvConfMap.get(StormServiceRegistrar.PARAM_NIMBUS_PRINCIPAL_NAME));
}
use of com.hortonworks.streamline.streams.cluster.catalog.Cluster in project streamline by hortonworks.
the class StormServiceRegistrarTest method testRegister_component_storm_ui_server_notPresent.
@Test
public void testRegister_component_storm_ui_server_notPresent() throws Exception {
Cluster cluster = getTestCluster(1L);
StormServiceRegistrar registrar = initializeServiceRegistrar();
try {
Config config = new Config();
config.put(StormServiceRegistrar.PARAM_NIMBUS_SEEDS, "storm-1,storm-2");
config.put(StormServiceRegistrar.PARAM_NIMBUS_THRIFT_PORT, (Object) 6627);
// no ui params
registrar.register(cluster, config, Collections.emptyList());
fail("Should throw IllegalArgumentException");
} catch (IllegalArgumentException e) {
// OK
Service stormService = environmentService.getServiceByName(cluster.getId(), Constants.Storm.SERVICE_NAME);
assertNull(stormService);
}
}
use of com.hortonworks.streamline.streams.cluster.catalog.Cluster in project streamline by hortonworks.
the class ZookeeperServiceRegistrarTest method testRegister.
@Test
public void testRegister() throws Exception {
Cluster cluster = getTestCluster(1L);
ZookeeperServiceRegistrar registrar = initializeServiceRegistrar();
Config config = new Config();
config.put(ZookeeperServiceRegistrar.PARAM_ZOOKEEPER_SERVER_HOSTNAMES, Lists.newArrayList("zookeeper-1", "zookeeper-2"));
config.put(ZookeeperServiceRegistrar.PARAM_ZOOKEEPER_PORT, (Object) 2181);
registrar.register(cluster, config, Collections.emptyList());
Service zkService = environmentService.getServiceByName(cluster.getId(), Constants.Zookeeper.SERVICE_NAME);
assertNotNull(zkService);
Component zkServer = environmentService.getComponentByName(zkService.getId(), ComponentPropertyPattern.ZOOKEEPER_SERVER.name());
assertNotNull(zkServer);
Collection<ComponentProcess> zkServerProcesses = environmentService.listComponentProcesses(zkServer.getId());
assertEquals(Sets.newHashSet("zookeeper-1", "zookeeper-2"), zkServerProcesses.stream().map(ComponentProcess::getHost).collect(Collectors.toSet()));
assertEquals(Sets.newHashSet(2181, 2181), zkServerProcesses.stream().map(ComponentProcess::getPort).collect(Collectors.toSet()));
ServiceConfiguration zooConf = environmentService.getServiceConfigurationByName(zkService.getId(), CONFIGURATION_NAME_ZOO_CFG);
assertNotNull(zooConf);
}
use of com.hortonworks.streamline.streams.cluster.catalog.Cluster in project streamline by hortonworks.
the class EnvironmentServiceTest method invalidateContainersWhenImportingClusterServices.
@Test
public void invalidateContainersWhenImportingClusterServices() throws Exception {
Deencapsulation.setField(environmentService, "clusterImporter", clusterImporter);
long clusterId = 1L;
Cluster testCluster = new Cluster();
testCluster.setId(clusterId);
ArrayList<Long> namespaceIds = Lists.newArrayList(1L, 2L, 3L);
List<NamespaceServiceClusterMap> mappings = new ArrayList<>();
mappings.add(new NamespaceServiceClusterMap(1L, "STORM", clusterId));
mappings.add(new NamespaceServiceClusterMap(2L, "KAFKA", clusterId));
mappings.add(new NamespaceServiceClusterMap(3L, "HADOOP", clusterId));
MockedNamespaceAwareContainer container1 = new MockedNamespaceAwareContainer();
MockedNamespaceAwareContainer container2 = new MockedNamespaceAwareContainer();
environmentService.addNamespaceAwareContainer(container1);
environmentService.addNamespaceAwareContainer(container2);
new Expectations() {
{
clusterImporter.importCluster(discoverer, testCluster);
result = testCluster;
dao.find(NAMESPACE_SERVICE_CLUSTER_MAP, Collections.singletonList(new QueryParam("clusterId", String.valueOf(clusterId))));
result = mappings;
}
};
// we're just checking whether it calls invalidation to associated containers properly
environmentService.importClusterServices(discoverer, testCluster);
assertEquals(3, container1.getInvalidatedNamespaceIds().size());
assertTrue(container1.getInvalidatedNamespaceIds().containsAll(namespaceIds));
assertEquals(3, container2.getInvalidatedNamespaceIds().size());
assertTrue(container2.getInvalidatedNamespaceIds().containsAll(namespaceIds));
}
use of com.hortonworks.streamline.streams.cluster.catalog.Cluster in project streamline by hortonworks.
the class AutoCredsServiceConfigurationReader method read.
@Override
public Map<String, String> read(Long clusterId, String serviceName) {
Cluster cluster = environmentService.getCluster(clusterId);
if (cluster == null) {
throw new IllegalArgumentException("Cluster with id " + clusterId + " doesn't exist.");
}
Collection<NamespaceServiceClusterMap> mappings = environmentService.listServiceClusterMapping(namespaceId, serviceName);
boolean associated = mappings.stream().anyMatch(map -> map.getClusterId().equals(clusterId));
if (!associated) {
return Collections.emptyMap();
}
Long serviceId = environmentService.getServiceIdByName(clusterId, serviceName);
if (serviceId == null) {
throw new IllegalStateException("Cluster " + clusterId + " is associated to the service " + serviceName + " for namespace " + namespaceId + ", but actual service doesn't exist.");
}
Collection<ServiceConfiguration> serviceConfigurations = environmentService.listServiceConfigurations(serviceId);
Map<String, String> flattenConfig = new HashMap<>();
String[] confNames = AutoCredsServiceConfigurations.valueOf(serviceName).getConfNames();
// let's forget about optimization here since two lists will be small enough
Arrays.stream(confNames).forEachOrdered(confName -> {
Optional<ServiceConfiguration> serviceConfigurationOptional = serviceConfigurations.stream().filter(sc -> sc.getName().equals(confName)).findFirst();
if (serviceConfigurationOptional.isPresent()) {
ServiceConfiguration sc = serviceConfigurationOptional.get();
try {
Map<String, String> configurationMap = sc.getConfigurationMap();
flattenConfig.putAll(configurationMap);
} catch (IOException e) {
throw new RuntimeException("Can't read configuration from service configuration - ID: " + sc.getId());
}
}
});
return flattenConfig;
}
Aggregations