use of io.fabric8.kubernetes.api.Controller in project camel by apache.
the class KubernetesReplicationControllersProducer method doGetReplicationController.
protected void doGetReplicationController(Exchange exchange, String operation) throws Exception {
ReplicationController rc = null;
String rcName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_REPLICATION_CONTROLLER_NAME, String.class);
String namespaceName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, String.class);
if (ObjectHelper.isEmpty(rcName)) {
LOG.error("Get a specific replication controller require specify a replication controller name");
throw new IllegalArgumentException("Get a specific replication controller require specify a replication controller name");
}
if (ObjectHelper.isEmpty(namespaceName)) {
LOG.error("Get a specific replication controller require specify a namespace name");
throw new IllegalArgumentException("Get a specific replication controller require specify a namespace name");
}
rc = getEndpoint().getKubernetesClient().replicationControllers().inNamespace(namespaceName).withName(rcName).get();
MessageHelper.copyHeaders(exchange.getIn(), exchange.getOut(), true);
exchange.getOut().setBody(rc);
}
use of io.fabric8.kubernetes.api.Controller in project camel by apache.
the class KubernetesReplicationControllersProducer method doCreateReplicationController.
protected void doCreateReplicationController(Exchange exchange, String operation) throws Exception {
ReplicationController rc = null;
String rcName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_REPLICATION_CONTROLLER_NAME, String.class);
String namespaceName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, String.class);
ReplicationControllerSpec rcSpec = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_REPLICATION_CONTROLLER_SPEC, ReplicationControllerSpec.class);
if (ObjectHelper.isEmpty(rcName)) {
LOG.error("Create a specific replication controller require specify a replication controller name");
throw new IllegalArgumentException("Create a specific replication controller require specify a replication controller name");
}
if (ObjectHelper.isEmpty(namespaceName)) {
LOG.error("Create a specific replication controller require specify a namespace name");
throw new IllegalArgumentException("Create a specific replication controller require specify a namespace name");
}
if (ObjectHelper.isEmpty(rcSpec)) {
LOG.error("Create a specific replication controller require specify a replication controller spec bean");
throw new IllegalArgumentException("Create a specific replication controller require specify a replication controller spec bean");
}
Map<String, String> labels = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_REPLICATION_CONTROLLERS_LABELS, Map.class);
ReplicationController rcCreating = new ReplicationControllerBuilder().withNewMetadata().withName(rcName).withLabels(labels).endMetadata().withSpec(rcSpec).build();
rc = getEndpoint().getKubernetesClient().replicationControllers().inNamespace(namespaceName).create(rcCreating);
MessageHelper.copyHeaders(exchange.getIn(), exchange.getOut(), true);
exchange.getOut().setBody(rc);
}
use of io.fabric8.kubernetes.api.Controller in project fabric8 by jboss-fuse.
the class CachingGitDataStoreTest method testDataStore.
@Test
public void testDataStore() throws Exception {
// dataStore.getDefaultVersion();
String defaultVersion = null;
assertEquals("defaultVersion", "1.0", defaultVersion);
// now lets import some data - using the old non-git file layout...
String importPath = basedir + "/../fabric8-karaf/src/main/resources/distro/fabric";
if (useOldImportFormat) {
assertFolderExists(importPath);
try {
dataStore.importFromFileSystem(importPath);
} catch (Exception e) {
// Ignore exception about missing url handlers. Not needed for this test anyway.
}
assertHasVersion(defaultVersion);
} else {
String prefix = importPath + "/fabric";
String profileImport = prefix + "/configs/versions/1.0/profiles";
assertFolderExists(profileImport);
// dataStore.importFromFileSystem(new File(profileImport), "fabric",
// "1.0", true);
assertHasVersion(defaultVersion);
}
remote.checkout().setName("1.0").call();
String importedProfile = "example-dozer";
String profile = importedProfile;
assertProfileExists(defaultVersion, profile);
// assertFolderExists("Should have imported an mq/ReadMe.md file!",
// getLocalGitFile("fabric/profiles/mq/ReadMe.md"));
String version = "1.1";
assertCreateVersion("1.0", version);
assertProfileConfiguration(version, importedProfile, Constants.AGENT_PID, "attribute.parents", "feature-camel");
assertProfileTextFileConfigurationContains(version, "example-camel-mq", "camel.xml", "http://camel.apache.org/schema/blueprint");
/*
* List<String> fileNames = dataStore.getConfigurationFileNames(version,
* "example-camel-mq"); assertNotNull("Should have some file names",
* fileNames); assertTrue("Should have some file names",
* fileNames.size() > 0); assertTrue("Should contain 'came",
* fileNames.size() > 0);
* assertCollectionContains("configurationFileNames", fileNames,
* "camel.xml");
*/
// lets test the profile attributes
// dataStore.getProfileAttributes(version, importedProfile);
Map<String, String> profileAttributes = Collections.emptyMap();
String parent = profileAttributes.get("parents");
assertEquals(importedProfile + ".profileAttributes[parent]", "feature-camel", parent);
System.out.println("Profile attributes: " + profileAttributes);
String profileAttributeKey = "myKey";
String expectedProfileAttributeValue = "myValue";
// dataStore.setProfileAttribute(version, importedProfile, profileAttributeKey, expectedProfileAttributeValue);
// profileAttributes = dataStore.getProfileAttributes(version, importedProfile);
System.out.println("Profile attributes: " + profileAttributes);
assertMapContains("Profile attribute[" + profileAttributeKey + "]", profileAttributes, profileAttributeKey, expectedProfileAttributeValue);
String hawtioRepoKey = "repository.hawtio";
// dataStore.getConfiguration(version, "hawtio", Constants.AGENT_PID);
Map<String, String> hawtioAttrbutes = Collections.emptyMap();
String currentHawtRepo = hawtioAttrbutes.get(hawtioRepoKey);
System.out.println("Current repository.hawtio: " + currentHawtRepo);
// now lets write via the hawtio API
FabricGitFacade hawtio = new FabricGitFacade();
hawtio.bindGitDataStoreForTesting(dataStore);
hawtio.activateForTesting();
/* String hawtioPropertyFile = "/fabric/profiles/" + dataStore.convertProfileIdToDirectory("hawtio") + "/" + Constants.AGENT_PID + ".properties";
hawtio.write(version, hawtioPropertyFile, "My commit message", "me", "me@apache.org", "# new file\n" + hawtioRepoKey + " = "
+ "mvn\\:io.hawt/hawtio-karaf/myNewVersion/xml/features" + "\n");
*/
// dataStore.getConfiguration(version, "hawtio", Constants.AGENT_PID);
hawtioAttrbutes = Collections.emptyMap();
String actual = hawtioAttrbutes.get(hawtioRepoKey);
assertEquals("should have found the updated hawtio repo key", "mvn:io.hawt/hawtio-karaf/myNewVersion/xml/features", actual);
// lets check that the file configurations recurses into folders
// Map<String, byte[]> tomcatFileConfigurations = dataStore.getFileConfigurations("1.0", "controller-tomcat");
// assertHasFileConfiguration(tomcatFileConfigurations, "tomcat/conf/server.xml.mvel");
/*
Collection<String> schemas = dataStore.listFiles("1.0", Arrays.asList("example-dozer"), "schemas");
assertNotNull(schemas);
assertContainerEquals("schemas for example-dozer", Arrays.asList("invoice.xsd"), new ArrayList<String>(schemas));
*/
// check we don't accidentally create a profile
String profileNotCreated = "shouldNotBeCreated";
// assertEquals("Should not create profile: " + profileNotCreated, null, dataStore.getProfile(version, profileNotCreated, false));
assertProfileNotExists(defaultVersion, profileNotCreated);
// assertFolderNotExists(getLocalGitFile("fabric/profiles/" +
// dataStore.convertProfileIdToDirectory(profileNotCreated)));
// now lets create some profiles in this new version
String newProfile = "myNewProfile";
// dataStore.createProfile(version, newProfile);
assertProfileExists(version, newProfile);
// lazy create a profile
String anotherNewProfile = "anotherNewProfile";
// dataStore.getProfile(version, anotherNewProfile, true);
assertProfileExists(version, anotherNewProfile);
version = "1.2";
assertCreateVersion("1.1", version);
// check this version has the profile too
assertProfileExists(version, newProfile);
assertProfileExists(version, profile);
// now lets delete a profile
dataStore.deleteProfile(version, newProfile);
assertProfileNotExists(version, newProfile);
// lets check the remote repo
remote.checkout().setName("1.1").call();
assertProfileExists("1.1", profile);
assertProfileExists("1.1", newProfile);
// assertFolderExists(getRemoteGitFile("fabric/profiles/" +
// dataStore.convertProfileIdToDirectory(profile)));
// assertFolderExists(getRemoteGitFile("fabric/profiles/" +
// dataStore.convertProfileIdToDirectory(newProfile)));
remote.checkout().setName("1.2").call();
assertProfileExists("1.2", profile);
assertProfileNotExists("1.2", newProfile);
// assertFolderExists(getRemoteGitFile("fabric/profiles/" +
// dataStore.convertProfileIdToDirectory(profile)));
// assertFolderNotExists(getRemoteGitFile("fabric/profiles/" +
// dataStore.convertProfileIdToDirectory(newProfile)));
remote.checkout().setName("1.0").call();
// assertFolderExists(getRemoteGitFile("fabric/profiles/" +
// dataStore.convertProfileIdToDirectory(profile)));
// assertFolderNotExists(getRemoteGitFile("fabric/profiles/" +
// dataStore.convertProfileIdToDirectory(newProfile)));
// delete version 1.2
assertHasVersion("1.1");
assertHasVersion("1.2");
// dataStore.removeVersion("1.2");
assertHasVersion("1.1");
assertHasNotVersion("1.2");
Collection<String> remoteBranches = RepositoryUtils.getBranches(remote.getRepository());
System.out.println("Remote branches after delete: " + remoteBranches);
}
use of io.fabric8.kubernetes.api.Controller in project fabric8 by jboss-fuse.
the class AutoScaleSingleMessageBrokerTest method createProvisionedFabric.
@Test
public void createProvisionedFabric() throws Exception {
System.out.println("The fabric has now been created somewhere and we have a controller for it, so lets define our requirements");
FabricRequirements requirements = new FabricRequirements();
requirements.profile("mq-default").minimumInstances(1);
FabricAssertions.assertRequirementsSatisfied(fabricController, requirements);
// now lets ensure that the autoscaler can scale back down again, stopping the broker
requirements.profile("mq-default").minimumInstances(0).maximumInstances(0);
FabricAssertions.assertRequirementsSatisfied(fabricController, requirements);
}
use of io.fabric8.kubernetes.api.Controller in project strimzi by strimzi.
the class KafkaAssemblyOperatorTest method createCluster.
private void createCluster(TestContext context, ConfigMap clusterCm) {
// create CM, Service, headless service, statefulset and so on
ConfigMapOperator mockCmOps = mock(ConfigMapOperator.class);
ServiceOperator mockServiceOps = mock(ServiceOperator.class);
ZookeeperSetOperator mockZsOps = mock(ZookeeperSetOperator.class);
KafkaSetOperator mockKsOps = mock(KafkaSetOperator.class);
PvcOperator mockPvcOps = mock(PvcOperator.class);
DeploymentOperator mockDepOps = mock(DeploymentOperator.class);
// Create a CM
String clusterCmName = clusterCm.getMetadata().getName();
String clusterCmNamespace = clusterCm.getMetadata().getNamespace();
when(mockCmOps.get(clusterCmNamespace, clusterCmName)).thenReturn(clusterCm);
ArgumentCaptor<Service> serviceCaptor = ArgumentCaptor.forClass(Service.class);
when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created()));
when(mockServiceOps.endpointReadiness(anyString(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
ArgumentCaptor<StatefulSet> ssCaptor = ArgumentCaptor.forClass(StatefulSet.class);
when(mockZsOps.reconcile(anyString(), anyString(), ssCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created()));
when(mockZsOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(null));
when(mockZsOps.rollingUpdate(anyString(), anyString())).thenReturn(Future.succeededFuture());
when(mockZsOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42));
when(mockKsOps.reconcile(anyString(), anyString(), ssCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created()));
when(mockKsOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(null));
when(mockKsOps.rollingUpdate(anyString(), anyString())).thenReturn(Future.succeededFuture());
when(mockKsOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42));
ArgumentCaptor<Deployment> depCaptor = ArgumentCaptor.forClass(Deployment.class);
when(mockDepOps.reconcile(anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created()));
// when(mockSsOps.readiness(any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
// when(mockPodOps.readiness(any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
// when(mockEndpointOps.readiness(any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
KafkaCluster kafkaCluster = KafkaCluster.fromConfigMap(clusterCm);
ZookeeperCluster zookeeperCluster = ZookeeperCluster.fromConfigMap(clusterCm);
TopicController topicController = TopicController.fromConfigMap(clusterCm);
ArgumentCaptor<ConfigMap> metricsCaptor = ArgumentCaptor.forClass(ConfigMap.class);
ArgumentCaptor<String> metricsNameCaptor = ArgumentCaptor.forClass(String.class);
when(mockCmOps.reconcile(anyString(), metricsNameCaptor.capture(), metricsCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created()));
KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, openShift, ClusterControllerConfig.DEFAULT_OPERATION_TIMEOUT_MS, mockCmOps, mockServiceOps, mockZsOps, mockKsOps, mockPvcOps, mockDepOps);
// Now try to create a KafkaCluster based on this CM
Async async = context.async();
ops.createOrUpdate(new Reconciliation("test-trigger", AssemblyType.KAFKA, clusterCmNamespace, clusterCmName), clusterCm, createResult -> {
if (createResult.failed()) {
createResult.cause().printStackTrace();
}
context.assertTrue(createResult.succeeded());
// No metrics config => no CMs created
Set<String> metricsNames = new HashSet<>();
if (kafkaCluster.isMetricsEnabled()) {
metricsNames.add(KafkaCluster.metricConfigsName(clusterCmName));
}
if (zookeeperCluster.isMetricsEnabled()) {
metricsNames.add(ZookeeperCluster.zookeeperMetricsName(clusterCmName));
}
/*
Map<String, ConfigMap> cmsByName = new HashMap<>();
Iterator<ConfigMap> it2 = metricsCaptor.getAllValues().iterator();
for (Iterator<String> it = metricsNameCaptor.getAllValues().iterator(); it.hasNext(); ) {
cmsByName.put(it.next(), it2.next());
}
context.assertEquals(metricsNames, cmsByName.keySet(),
"Unexpected metrics ConfigMaps");
if (kafkaCluster.isMetricsEnabled()) {
ConfigMap kafkaMetricsCm = cmsByName.get(KafkaCluster.metricConfigsName(clusterCmName));
context.assertEquals(ResourceUtils.labels(Labels.STRIMZI_TYPE_LABEL, "kafka",
Labels.STRIMZI_CLUSTER_LABEL, clusterCmName,
"my-user-label", "cromulent"), kafkaMetricsCm.getMetadata().getLabels());
}
if (zookeeperCluster.isMetricsEnabled()) {
ConfigMap zookeeperMetricsCm = cmsByName.get(ZookeeperCluster.zookeeperMetricsName(clusterCmName));
context.assertEquals(ResourceUtils.labels(Labels.STRIMZI_TYPE_LABEL, "zookeeper",
Labels.STRIMZI_CLUSTER_LABEL, clusterCmName,
"my-user-label", "cromulent"), zookeeperMetricsCm.getMetadata().getLabels());
}*/
// We expect a headless and headful service
List<Service> capturedServices = serviceCaptor.getAllValues();
context.assertEquals(4, capturedServices.size());
context.assertEquals(set(KafkaCluster.kafkaClusterName(clusterCmName), KafkaCluster.headlessName(clusterCmName), ZookeeperCluster.zookeeperClusterName(clusterCmName), ZookeeperCluster.zookeeperHeadlessName(clusterCmName)), capturedServices.stream().map(svc -> svc.getMetadata().getName()).collect(Collectors.toSet()));
// Assertions on the statefulset
List<StatefulSet> capturedSs = ssCaptor.getAllValues();
// We expect a statefulSet for kafka and zookeeper...
context.assertEquals(set(KafkaCluster.kafkaClusterName(clusterCmName), ZookeeperCluster.zookeeperClusterName(clusterCmName)), capturedSs.stream().map(ss -> ss.getMetadata().getName()).collect(Collectors.toSet()));
// if topic controller configuration was defined in the CM
if (topicController != null) {
List<Deployment> capturedDeps = depCaptor.getAllValues();
context.assertEquals(1, capturedDeps.size());
context.assertEquals(TopicController.topicControllerName(clusterCmName), capturedDeps.get(0).getMetadata().getName());
}
// PvcOperations only used for deletion
verifyNoMoreInteractions(mockPvcOps);
async.complete();
});
}
Aggregations