use of io.fabric8.agent.service.State in project strimzi by strimzi.
the class ControllerTest method configMapRemoved.
// TODO 3way reconcilation where kafka and kube agree
// TODO 3way reconcilation where all three agree
// TODO 3way reconcilation with conflict
// TODO reconciliation where only private state exists => delete the private state
// TODO tests for the other reconciliation cases
// + non-matching predicate
// + error cases
private void configMapRemoved(TestContext context, Exception deleteTopicException, Exception storeException) {
Topic kubeTopic = new Topic.Builder(topicName.toString(), 10, (short) 2, map("cleanup.policy", "bar")).build();
Topic kafkaTopic = kubeTopic;
Topic privateTopic = kubeTopic;
mockKafka.setCreateTopicResponse(topicName.toString(), null).createTopic(kafkaTopic, ar -> {
});
mockKafka.setTopicMetadataResponse(topicName, Utils.getTopicMetadata(kubeTopic), null);
mockKafka.setDeleteTopicResponse(topicName, deleteTopicException);
mockTopicStore.setCreateTopicResponse(topicName, null).create(privateTopic, ar -> {
});
mockTopicStore.setDeleteTopicResponse(topicName, storeException);
ConfigMap cm = TopicSerialization.toConfigMap(kubeTopic, cmPredicate);
Async async = context.async();
controller.onConfigMapDeleted(cm, ar -> {
if (deleteTopicException != null || storeException != null) {
assertFailed(context, ar);
if (deleteTopicException != null) {
// should still exist
mockKafka.assertExists(context, kafkaTopic.getTopicName());
} else {
mockKafka.assertNotExists(context, kafkaTopic.getTopicName());
}
mockTopicStore.assertExists(context, kafkaTopic.getTopicName());
} else {
assertSucceeded(context, ar);
mockKafka.assertNotExists(context, kafkaTopic.getTopicName());
mockTopicStore.assertNotExists(context, kafkaTopic.getTopicName());
}
async.complete();
});
}
use of io.fabric8.agent.service.State in project strimzi by strimzi.
the class KafkaClusterTest method testZookeeperScaleUpScaleDown.
@Test
@KafkaCluster(name = "my-cluster", kafkaNodes = 1, zkNodes = 1)
public void testZookeeperScaleUpScaleDown() {
// kafka cluster already deployed via annotation
String clusterName = "my-cluster";
LOGGER.info("Running zookeeperScaleUpScaleDown with cluster {}", clusterName);
// kubeClient.waitForStatefulSet(zookeeperStatefulSetName(clusterName), 1);
KubernetesClient client = new DefaultKubernetesClient();
final int initialReplicas = client.apps().statefulSets().inNamespace(NAMESPACE).withName(zookeeperStatefulSetName(clusterName)).get().getStatus().getReplicas();
assertEquals(1, initialReplicas);
// scale up
final int scaleTo = initialReplicas + 2;
final int[] newPodIds = { initialReplicas, initialReplicas + 1 };
final String[] newPodName = { zookeeperPodName(clusterName, newPodIds[0]), zookeeperPodName(clusterName, newPodIds[1]) };
final String firstPodName = zookeeperPodName(clusterName, 0);
LOGGER.info("Scaling zookeeper up to {}", scaleTo);
replaceCm(clusterName, "zookeeper-nodes", String.valueOf(scaleTo));
kubeClient.waitForPod(newPodName[0]);
kubeClient.waitForPod(newPodName[1]);
// check the new node is either in leader or follower state
waitForZkMntr(firstPodName, Pattern.compile("zk_server_state\\s+(leader|follower)"));
waitForZkMntr(newPodName[0], Pattern.compile("zk_server_state\\s+(leader|follower)"));
waitForZkMntr(newPodName[1], Pattern.compile("zk_server_state\\s+(leader|follower)"));
// TODO Check for k8s events, logs for errors
// scale down
LOGGER.info("Scaling down");
replaceCm(clusterName, "zookeeper-nodes", String.valueOf(1));
kubeClient.waitForResourceDeletion("po", zookeeperPodName(clusterName, 1));
// Wait for the one remaining node to enter standalone mode
waitForZkMntr(firstPodName, Pattern.compile("zk_server_state\\s+standalone"));
// TODO Check for k8s events, logs for errors
}
use of io.fabric8.agent.service.State in project syndesis by syndesisio.
the class ActivityTrackingController method pollPods.
private void pollPods() {
try {
// clear the marks
for (PodLogMonitor handler : podHandlers.values()) {
handler.markInOpenshift.set(false);
}
PodList podList = listPods();
for (Pod pod : podList.getItems()) {
// We are only looking for running containers.
if (!"Running".equals(pod.getStatus().getPhase())) {
continue;
}
String name = pod.getMetadata().getName();
PodLogMonitor handler = podHandlers.get(name);
if (handler == null) {
// create a new handler.
try {
handler = new PodLogMonitor(this, pod);
handler.start();
LOG.info("Created handler for pod: {}", handler.podName);
podHandlers.put(name, handler);
} catch (IOException e) {
LOG.error("Unexpected Error", e);
}
} else {
// mark existing handlers as being used.
handler.markInOpenshift.set(true);
}
}
// Remove items from the map which are no longer in openshift
Iterator<Map.Entry<String, PodLogMonitor>> iterator = podHandlers.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, PodLogMonitor> next = iterator.next();
if (!next.getValue().markInOpenshift.get()) {
LOG.info("Pod not tracked by openshift anymore: {}", next.getValue().podName);
next.getValue().keepTrying.set(false);
iterator.remove();
}
}
@SuppressWarnings("unchecked") Map<String, Object> // NOPMD
pods = dbGet(HashMap.class, "/activity/pods");
if (pods != null) {
pods.keySet().removeAll(podHandlers.keySet());
for (String o : pods.keySet()) {
jsonDB.delete("/activity/pods/" + o);
LOG.info("Pod state removed from db: {}", o);
}
}
} catch (@SuppressWarnings("PMD.AvoidCatchingGenericException") RuntimeException | IOException e) {
LOG.error("Unexpected Error occurred.", e);
}
}
use of io.fabric8.agent.service.State in project fabric8 by fabric8io.
the class SessionPodsAreReady method call.
@Override
public Boolean call() throws Exception {
boolean result = true;
List<Pod> pods = notNullList(kubernetesClient.pods().inNamespace(session.getNamespace()).list().getItems());
if (pods.isEmpty()) {
result = false;
session.getLogger().warn("No pods are available yet, waiting...");
}
for (Pod pod : pods) {
if (!KubernetesHelper.isPodReady(pod)) {
PodStatus podStatus = pod.getStatus();
int restartCount = 0;
if (podStatus != null) {
if ("Succeeded".equals(podStatus.getPhase())) {
// that have finished. see: OSFUSE-317
continue;
}
List<ContainerStatus> containerStatuses = podStatus.getContainerStatuses();
for (ContainerStatus containerStatus : containerStatuses) {
if (restartCount == 0) {
Integer restartCountValue = containerStatus.getRestartCount();
if (restartCountValue != null) {
restartCount = restartCountValue.intValue();
}
}
ContainerState state = containerStatus.getState();
if (state != null) {
ContainerStateWaiting waiting = state.getWaiting();
String containerName = containerStatus.getName();
if (waiting != null) {
session.getLogger().warn("Waiting for container:" + containerName + ". Reason:" + waiting.getReason());
} else {
session.getLogger().warn("Waiting for container:" + containerName + ".");
}
}
}
}
result = false;
String name = KubernetesHelper.getName(pod);
File yamlFile = new File(session.getBaseDir(), "target/test-pod-status/" + name + ".yml");
yamlFile.getParentFile().mkdirs();
try {
KubernetesHelper.saveYaml(pod, yamlFile);
} catch (IOException e) {
session.getLogger().warn("Failed to write " + yamlFile + ". " + e);
}
if (KubernetesHelper.isPodRunning(pod)) {
List<Container> containers = pod.getSpec().getContainers();
for (Container container : containers) {
File logFile = LogHelpers.getLogFileName(session.getBaseDir(), name, container, restartCount);
String log = kubernetesClient.pods().inNamespace(session.getNamespace()).withName(name).inContainer(container.getName()).getLog();
IOHelpers.writeFully(logFile, log);
}
}
}
}
return result;
}
use of io.fabric8.agent.service.State in project fabric8 by fabric8io.
the class ScrState method checkBundle.
@Override
protected Check checkBundle(Bundle bundle) {
if (bundle.getHeaders().get("Service-Component") == null) {
return null;
}
ScrService svc = tracker.getService();
if (svc == null) {
return new Check("scr-state", "No ScrService found");
}
Component[] components = svc.getComponents(bundle);
if (components != null) {
for (Component component : components) {
int state = component.getState();
if (state != Component.STATE_ACTIVE && state != Component.STATE_REGISTERED && state != Component.STATE_FACTORY) {
return new Check("scr-state", "SCR bundle " + bundle.getBundleId() + " is in state " + getState(state));
}
}
}
return null;
}
Aggregations