use of io.fabric8.kubernetes.client.KubernetesClient in project strimzi by strimzi.
the class Main method isOnOpenShift.
static Future<Boolean> isOnOpenShift(Vertx vertx, KubernetesClient client) {
URL kubernetesApi = client.getMasterUrl();
Future<Boolean> fut = Future.future();
HttpClientOptions httpClientOptions = new HttpClientOptions();
httpClientOptions.setDefaultHost(kubernetesApi.getHost());
if (kubernetesApi.getPort() == -1) {
httpClientOptions.setDefaultPort(kubernetesApi.getDefaultPort());
} else {
httpClientOptions.setDefaultPort(kubernetesApi.getPort());
}
if (kubernetesApi.getProtocol().equals("https")) {
httpClientOptions.setSsl(true);
httpClientOptions.setTrustAll(true);
}
HttpClient httpClient = vertx.createHttpClient(httpClientOptions);
httpClient.getNow("/oapi", res -> {
if (res.statusCode() == HttpResponseStatus.OK.code()) {
log.debug("{} returned {}. We are on OpenShift.", res.request().absoluteURI(), res.statusCode());
// We should be on OpenShift based on the /oapi result. We can now safely try isAdaptable() to be 100% sure.
Boolean isOpenShift = Boolean.TRUE.equals(client.isAdaptable(OpenShiftClient.class));
fut.complete(isOpenShift);
} else {
log.debug("{} returned {}. We are not on OpenShift.", res.request().absoluteURI(), res.statusCode());
fut.complete(Boolean.FALSE);
}
});
return fut;
}
use of io.fabric8.kubernetes.client.KubernetesClient in project strimzi by strimzi.
the class Main method main.
public static void main(String[] args) {
Vertx vertx = Vertx.vertx();
KubernetesClient client = new DefaultKubernetesClient();
isOnOpenShift(vertx, client).setHandler(os -> {
if (os.succeeded()) {
run(vertx, client, os.result().booleanValue(), System.getenv()).setHandler(ar -> {
if (ar.failed()) {
log.error("Unable to start controller for 1 or more namespace", ar.cause());
System.exit(1);
}
});
} else {
log.error("Failed to distinguish between Kubernetes and OpenShift", os.cause());
System.exit(1);
}
});
}
use of io.fabric8.kubernetes.client.KubernetesClient in project strimzi by strimzi.
the class StatefulSetOperatorTest method mocker.
@Override
protected void mocker(KubernetesClient mockClient, MixedOperation op) {
AppsAPIGroupDSL mockExt = mock(AppsAPIGroupDSL.class);
when(mockExt.statefulSets()).thenReturn(op);
when(mockClient.apps()).thenReturn(mockExt);
}
use of io.fabric8.kubernetes.client.KubernetesClient in project strimzi by strimzi.
the class K8sImplTest method testList.
@Test
public void testList(TestContext context) {
Async async = context.async();
KubernetesClient mockClient = mock(KubernetesClient.class);
MixedOperation<ConfigMap, ConfigMapList, DoneableConfigMap, Resource<ConfigMap, DoneableConfigMap>> mockConfigMaps = mock(MixedOperation.class);
when(mockClient.configMaps()).thenReturn(mockConfigMaps);
when(mockConfigMaps.withLabels(any())).thenReturn(mockConfigMaps);
when(mockConfigMaps.inNamespace(any())).thenReturn(mockConfigMaps);
when(mockConfigMaps.list()).thenReturn(new ConfigMapListBuilder().addNewItem().withKind("ConfigMap").withNewMetadata().withName("unrelated").withLabels(Collections.singletonMap("foo", "bar")).endMetadata().withData(Collections.singletonMap("foo", "bar")).endItem().addNewItem().endItem().build());
K8sImpl k8s = new K8sImpl(vertx, mockClient, new LabelPredicate("foo", "bar"), "default");
k8s.listMaps(ar -> {
List<ConfigMap> list = ar.result();
context.assertFalse(list.isEmpty());
async.complete();
});
}
use of io.fabric8.kubernetes.client.KubernetesClient in project strimzi by strimzi.
the class KafkaClusterTest method testKafkaScaleUpScaleDown.
@Test
@KafkaCluster(name = "my-cluster", kafkaNodes = 3)
public void testKafkaScaleUpScaleDown() {
// kafka cluster already deployed via annotation
String clusterName = "my-cluster";
LOGGER.info("Running kafkaScaleUpScaleDown {}", clusterName);
// kubeClient.waitForStatefulSet(kafkaStatefulSetName(clusterName), 3);
KubernetesClient client = new DefaultKubernetesClient();
final int initialReplicas = client.apps().statefulSets().inNamespace(NAMESPACE).withName(kafkaStatefulSetName(clusterName)).get().getStatus().getReplicas();
assertEquals(3, initialReplicas);
// scale up
final int scaleTo = initialReplicas + 1;
final int newPodId = initialReplicas;
final int newBrokerId = newPodId;
final String newPodName = kafkaPodName(clusterName, newPodId);
final String firstPodName = kafkaPodName(clusterName, 0);
LOGGER.info("Scaling Kafka up to {}", scaleTo);
replaceCm(clusterName, "kafka-nodes", String.valueOf(initialReplicas + 1));
kubeClient.waitForStatefulSet(kafkaStatefulSetName(clusterName), initialReplicas + 1);
// Test that the new broker has joined the kafka cluster by checking it knows about all the other broker's API versions
// (execute bash because we want the env vars expanded in the pod)
String versions = getBrokerApiVersions(newPodName);
for (int brokerId = 0; brokerId < scaleTo; brokerId++) {
assertTrue(versions, versions.indexOf("(id: " + brokerId + " rack: ") >= 0);
}
// TODO Check for k8s events, logs for errors
// scale down
LOGGER.info("Scaling down");
// client.apps().statefulSets().inNamespace(NAMESPACE).withName(kafkaStatefulSetName(clusterName)).scale(initialReplicas, true);
replaceCm(clusterName, "kafka-nodes", String.valueOf(initialReplicas));
kubeClient.waitForStatefulSet(kafkaStatefulSetName(clusterName), initialReplicas);
final int finalReplicas = client.apps().statefulSets().inNamespace(NAMESPACE).withName(kafkaStatefulSetName(clusterName)).get().getStatus().getReplicas();
assertEquals(initialReplicas, finalReplicas);
versions = getBrokerApiVersions(firstPodName);
assertTrue("Expect the added broker, " + newBrokerId + ", to no longer be present in output of kafka-broker-api-versions.sh", versions.indexOf("(id: " + newBrokerId + " rack: ") == -1);
// TODO Check for k8s events, logs for errors
}
Aggregations