use of io.fabric8.kubernetes.api.model.batch.v1.JobBuilder in project elastest-torm by elastest.
the class K8sService method deployJob.
public JobResult deployJob(DockerContainer container, String namespace) throws Exception {
final JobResult result = new JobResult();
container.getCmd().get().forEach((command) -> {
logger.debug("deployJob => Commands to execute: {}", command);
});
try {
logger.info("Starting deploy of Job with name {} in namespace {}", container.getContainerName().get(), namespace);
logger.info(String.join(",", container.getCmd().get()));
Map<String, String> k8sJobLabels = container.getLabels().get();
String containerNameWithoutUnderscore = container.getContainerName().get().replace("_", "-");
k8sJobLabels.put(LABEL_JOB_NAME, containerNameWithoutUnderscore);
k8sJobLabels.put(LABEL_COMPONENT, containerNameWithoutUnderscore);
k8sJobLabels.put(LABEL_COMPONENT_TYPE, ElastestComponentType.JOB.value);
final Job job = new JobBuilder(Boolean.FALSE).withApiVersion("batch/v1").withNewMetadata().withName(containerNameWithoutUnderscore).withLabels(k8sJobLabels).endMetadata().withNewSpec().withNewTemplate().withNewMetadata().withLabels(k8sJobLabels).endMetadata().withNewSpec().addNewContainer().withName(containerNameWithoutUnderscore).withImage(container.getImageId()).withArgs(container.getCmd().get()).withEnv(getEnvVarListFromStringList(container.getEnvs().get())).endContainer().withRestartPolicy("Never").endSpec().endTemplate().endSpec().build();
logger.info("Creating Job: {} in namespace {}", job.getMetadata().getLabels().get(LABEL_JOB_NAME), namespace);
client.batch().jobs().inNamespace(namespace).create(job);
result.setResult(1);
result.setJobName(job.getMetadata().getName());
result.setPodName("");
final CountDownLatch watchLatch = new CountDownLatch(1);
try (final Watch ignored = client.pods().inNamespace(namespace).withLabel(LABEL_JOB_NAME, job.getMetadata().getName()).watch(new Watcher<Pod>() {
@Override
public void eventReceived(final Action action, Pod pod) {
job.getMetadata().getLabels().forEach((label, value) -> {
logger.debug("Label: {}={}", label, value);
});
logger.debug("Job {} receives an event", job.getMetadata().getLabels().get(LABEL_JOB_NAME));
logger.debug("Event received: {}", pod.getStatus().getPhase());
logger.debug("Action: {}", action.toString());
logger.debug("Enum Pending: {}", PodsStatusEnum.PENDING.toString());
if (result.getPodName().isEmpty()) {
result.setPodName(pod.getMetadata().getName());
}
if (!(pod.getStatus().getPhase().equals(PodsStatusEnum.PENDING.toString())) && !(pod.getStatus().getPhase().equals(PodsStatusEnum.RUNNING.toString()))) {
logger.info("Pod executed with result: {}", pod.getStatus().getPhase());
result.setResult(pod.getStatus().getPhase().equals(PodsStatusEnum.SUCCEEDED.toString()) ? 0 : 1);
logger.info("Job {} is completed!", pod.getMetadata().getName());
watchLatch.countDown();
}
}
@Override
public void onClose(final KubernetesClientException e) {
}
})) {
watchLatch.await();
} catch (final KubernetesClientException e) {
logger.error("Could not watch pod", e);
}
} catch (final KubernetesClientException e) {
String msg = "Unable to create job";
logger.error(msg, e);
throw new Exception(msg, e);
}
return result;
}
use of io.fabric8.kubernetes.api.model.batch.v1.JobBuilder in project strimzi by strimzi.
the class BridgeClients method consumerStrimziBridge.
public Job consumerStrimziBridge() {
Map<String, String> consumerLabels = new HashMap<>();
consumerLabels.put("app", this.getConsumerName());
consumerLabels.put(Constants.KAFKA_CLIENTS_LABEL_KEY, Constants.KAFKA_BRIDGE_CLIENTS_LABEL_VALUE);
PodSpecBuilder podSpecBuilder = new PodSpecBuilder();
if (Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET != null && !Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET.isEmpty()) {
List<LocalObjectReference> imagePullSecrets = Collections.singletonList(new LocalObjectReference(Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET));
podSpecBuilder.withImagePullSecrets(imagePullSecrets);
}
return new JobBuilder().withNewMetadata().withNamespace(this.getNamespaceName()).withLabels(consumerLabels).withName(this.getConsumerName()).endMetadata().withNewSpec().withBackoffLimit(0).withNewTemplate().withNewMetadata().withLabels(consumerLabels).endMetadata().withNewSpecLike(podSpecBuilder.build()).withRestartPolicy("OnFailure").addNewContainer().withName(this.getConsumerName()).withImagePullPolicy(Constants.IF_NOT_PRESENT_IMAGE_PULL_POLICY).withImage(Environment.TEST_HTTP_CONSUMER_IMAGE).addNewEnv().withName("HOSTNAME").withValue(this.getBootstrapAddress()).endEnv().addNewEnv().withName("PORT").withValue(Integer.toString(port)).endEnv().addNewEnv().withName("TOPIC").withValue(this.getTopicName()).endEnv().addNewEnv().withName("POLL_INTERVAL").withValue(Integer.toString(pollInterval)).endEnv().addNewEnv().withName("MESSAGE_COUNT").withValue(Integer.toString(this.getMessageCount())).endEnv().addNewEnv().withName("GROUP_ID").withValue(this.getConsumerGroup()).endEnv().endContainer().endSpec().endTemplate().endSpec().build();
}
use of io.fabric8.kubernetes.api.model.batch.v1.JobBuilder in project strimzi by strimzi.
the class KafkaClients method defaultProducerStrimzi.
public JobBuilder defaultProducerStrimzi() {
if (producerName == null || producerName.isEmpty()) {
throw new InvalidParameterException("Producer name is not set.");
}
Map<String, String> producerLabels = new HashMap<>();
producerLabels.put("app", producerName);
producerLabels.put(Constants.KAFKA_CLIENTS_LABEL_KEY, Constants.KAFKA_CLIENTS_LABEL_VALUE);
PodSpecBuilder podSpecBuilder = new PodSpecBuilder();
if (Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET != null && !Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET.isEmpty()) {
List<LocalObjectReference> imagePullSecrets = Collections.singletonList(new LocalObjectReference(Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET));
podSpecBuilder.withImagePullSecrets(imagePullSecrets);
}
return new JobBuilder().withNewMetadata().withNamespace(this.getNamespaceName()).withLabels(producerLabels).withName(producerName).endMetadata().withNewSpec().withBackoffLimit(0).withNewTemplate().withNewMetadata().withName(producerName).withNamespace(this.getNamespaceName()).withLabels(producerLabels).endMetadata().withNewSpecLike(podSpecBuilder.build()).withRestartPolicy("Never").withContainers().addNewContainer().withName(producerName).withImagePullPolicy(Constants.IF_NOT_PRESENT_IMAGE_PULL_POLICY).withImage(Environment.TEST_PRODUCER_IMAGE).addNewEnv().withName("BOOTSTRAP_SERVERS").withValue(this.getBootstrapAddress()).endEnv().addNewEnv().withName("TOPIC").withValue(this.getTopicName()).endEnv().addNewEnv().withName("DELAY_MS").withValue(String.valueOf(delayMs)).endEnv().addNewEnv().withName("LOG_LEVEL").withValue("DEBUG").endEnv().addNewEnv().withName("MESSAGE_COUNT").withValue(String.valueOf(messageCount)).endEnv().addNewEnv().withName("MESSAGE").withValue(message).endEnv().addNewEnv().withName("PRODUCER_ACKS").withValue("all").endEnv().addNewEnv().withName("ADDITIONAL_CONFIG").withValue(this.getAdditionalConfig()).endEnv().addNewEnv().withName("BLOCKING_PRODUCER").withValue("true").endEnv().endContainer().endSpec().endTemplate().endSpec();
}
use of io.fabric8.kubernetes.api.model.batch.v1.JobBuilder in project strimzi by strimzi.
the class KafkaTracingClients method kafkaStreamsWithTracing.
public Job kafkaStreamsWithTracing() {
String kafkaStreamsName = "hello-world-streams";
Map<String, String> kafkaStreamLabels = new HashMap<>();
kafkaStreamLabels.put("app", kafkaStreamsName);
PodSpecBuilder podSpecBuilder = new PodSpecBuilder();
if (Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET != null && !Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET.isEmpty()) {
List<LocalObjectReference> imagePullSecrets = Collections.singletonList(new LocalObjectReference(Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET));
podSpecBuilder.withImagePullSecrets(imagePullSecrets);
}
return new JobBuilder().withNewMetadata().withNamespace(ResourceManager.kubeClient().getNamespace()).withLabels(kafkaStreamLabels).withName(kafkaStreamsName).endMetadata().withNewSpec().withBackoffLimit(0).withNewTemplate().withNewMetadata().withLabels(kafkaStreamLabels).endMetadata().withNewSpecLike(podSpecBuilder.build()).withRestartPolicy("Never").withContainers().addNewContainer().withName(kafkaStreamsName).withImage(Environment.TEST_STREAMS_IMAGE).addNewEnv().withName("BOOTSTRAP_SERVERS").withValue(this.getBootstrapAddress()).endEnv().addNewEnv().withName("APPLICATION_ID").withValue(kafkaStreamsName).endEnv().addNewEnv().withName("SOURCE_TOPIC").withValue(this.getTopicName()).endEnv().addNewEnv().withName("TARGET_TOPIC").withValue(streamsTopicTargetName).endEnv().addNewEnv().withName("LOG_LEVEL").withValue("DEBUG").endEnv().addNewEnv().withName("JAEGER_SERVICE_NAME").withValue(jaegerServiceStreamsName).endEnv().addNewEnv().withName("JAEGER_AGENT_HOST").withValue(jaegerServerAgentName).endEnv().addNewEnv().withName("JAEGER_SAMPLER_TYPE").withValue(JAEGER_SAMPLER_TYPE).endEnv().addNewEnv().withName("JAEGER_SAMPLER_PARAM").withValue(JAEGER_SAMPLER_PARAM).endEnv().endContainer().endSpec().endTemplate().endSpec().build();
}
use of io.fabric8.kubernetes.api.model.batch.v1.JobBuilder in project hugegraph-computer by hugegraph.
the class ComputerJobDeployer method getJob.
private Job getJob(String crName, ObjectMeta meta, ComputerJobSpec spec, int instances, List<Container> containers) {
List<Volume> volumes = spec.getVolumes();
if (volumes == null) {
volumes = new ArrayList<>();
} else {
volumes = Lists.newArrayList(volumes);
}
volumes.addAll(this.getConfigMapAndSecretVolumes(spec));
String configMapName = KubeUtil.configMapName(crName);
Volume configVolume = this.getComputerConfigVolume(configMapName);
volumes.add(configVolume);
// Support PodSpec template
PodTemplateSpec podTemplateSpec = spec.getPodTemplateSpec();
if (podTemplateSpec == null) {
podTemplateSpec = new PodTemplateSpec();
} else {
podTemplateSpec = Serialization.clone(podTemplateSpec);
}
ObjectMeta metadata = podTemplateSpec.getMetadata();
if (metadata == null) {
metadata = new ObjectMeta();
}
metadata = new ObjectMetaBuilder(metadata).addToLabels(meta.getLabels()).addToAnnotations(meta.getAnnotations()).build();
podTemplateSpec.setMetadata(metadata);
PodSpec podSpec = podTemplateSpec.getSpec();
if (podSpec == null) {
podSpec = new PodSpec();
}
podSpec.setVolumes(volumes);
podSpec.setContainers(containers);
podSpec.setRestartPolicy(JOB_RESTART_POLICY);
if (podSpec.getTerminationGracePeriodSeconds() == null) {
podSpec.setTerminationGracePeriodSeconds(TERMINATION_GRACE_PERIOD);
}
if (CollectionUtils.isEmpty(podSpec.getImagePullSecrets())) {
podSpec.setImagePullSecrets(spec.getPullSecrets());
}
if (CollectionUtils.isEmpty(podSpec.getTopologySpreadConstraints())) {
// Pod topology spread constraints default by node
LabelSelector labelSelector = new LabelSelector();
labelSelector.setMatchLabels(meta.getLabels());
TopologySpreadConstraint spreadConstraint = new TopologySpreadConstraint(labelSelector, MAX_SKEW, TOPOLOGY_KEY, SCHEDULE_ANYWAY);
podSpec.setTopologySpreadConstraints(Lists.newArrayList(spreadConstraint));
}
podTemplateSpec.setSpec(podSpec);
return new JobBuilder().withMetadata(meta).withNewSpec().withParallelism(instances).withCompletions(instances).withBackoffLimit(JOB_BACKOFF_LIMIT).withTemplate(podTemplateSpec).endSpec().build();
}
Aggregations