use of io.fabric8.kubernetes.api.model.apps.DeploymentList in project kubernetes-client by fabric8io.
the class DeploymentTest method testListWithLabels.
@Test
void testListWithLabels() {
server.expect().withPath("/apis/apps/v1/namespaces/test/deployments?labelSelector=" + Utils.toUrlEncoded("key1=value1,key2=value2,key3=value3")).andReturn(200, new DeploymentListBuilder().build()).always();
server.expect().withPath("/apis/apps/v1/namespaces/test/deployments?labelSelector=" + Utils.toUrlEncoded("key1=value1,key2=value2")).andReturn(200, new DeploymentListBuilder().addNewItem().and().addNewItem().and().addNewItem().and().build()).once();
DeploymentList deploymentList = client.apps().deployments().withLabel("key1", "value1").withLabel("key2", "value2").withLabel("key3", "value3").list();
assertNotNull(deploymentList);
assertEquals(0, deploymentList.getItems().size());
deploymentList = client.apps().deployments().withLabel("key1", "value1").withLabel("key2", "value2").list();
assertNotNull(deploymentList);
assertEquals(3, deploymentList.getItems().size());
}
use of io.fabric8.kubernetes.api.model.apps.DeploymentList in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class StrimziManager method onStart.
@PostConstruct
protected void onStart() {
// update the initial deployments as a single operation
FilterWatchListDeletable<Deployment, DeploymentList> deployments = this.kubernetesClient.apps().deployments().inAnyNamespace().withLabels(Map.of("app.kubernetes.io/part-of", "managed-kafka"));
for (Deployment deployment : deployments.list().getItems()) {
if (isStrimziDeployment(deployment)) {
log.debugf("Adding Deployment %s/%s", deployment.getMetadata().getNamespace(), deployment.getMetadata().getName());
updateStrimziVersion(deployment);
}
}
updateStatus();
this.resourceInformerFactory.create(Deployment.class, deployments, new ResourceEventHandler<Deployment>() {
@Override
public void onAdd(Deployment deployment) {
if (isStrimziDeployment(deployment)) {
log.debugf("Add/update event received for Deployment %s/%s", deployment.getMetadata().getNamespace(), deployment.getMetadata().getName());
updateStrimziVersion(deployment);
updateStatus();
}
}
@Override
public void onUpdate(Deployment oldDeployment, Deployment newDeployment) {
onAdd(newDeployment);
}
@Override
public void onDelete(Deployment deployment, boolean deletedFinalStateUnknown) {
if (isStrimziDeployment(deployment)) {
log.debugf("Delete event received for Deployment %s/%s", deployment.getMetadata().getNamespace(), deployment.getMetadata().getName());
deleteStrimziVersion(deployment);
updateStatus();
}
}
});
}
use of io.fabric8.kubernetes.api.model.apps.DeploymentList in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaProvisioner method install.
/**
* Install this Kafka provisioner. This can be called once per test class or per test method.
*/
public void install() throws Exception {
// delete/create the namespaces to be used
Map<String, String> nsAnnotations = new HashMap<>();
if (PerformanceEnvironment.KAFKA_COLLECT_LOG) {
nsAnnotations.put(Constants.ORG_BF2_KAFKA_PERFORMANCE_COLLECTPODLOG, "true");
}
cluster.waitForDeleteNamespace(StrimziOperatorManager.OPERATOR_NS);
FleetShardOperatorManager.deleteFleetShard(cluster.kubeClient()).get(2, TimeUnit.MINUTES);
cluster.createNamespace(Constants.KAFKA_NAMESPACE, nsAnnotations, Map.of());
List<Node> workers = cluster.getWorkerNodes();
boolean smallNodes = workers.stream().anyMatch(n -> TestUtils.getMaxAvailableResources(n).cpuMillis < 3000);
if (smallNodes) {
MixedOperation<Deployment, DeploymentList, RollableScalableResource<Deployment>> deployments = cluster.kubeClient().client().apps().deployments();
this.informer = deployments.inAnyNamespace().inform(new ResourceEventHandler<Deployment>() {
@Override
public void onUpdate(Deployment oldObj, Deployment newObj) {
onAdd(newObj);
}
@Override
public void onDelete(Deployment obj, boolean deletedFinalStateUnknown) {
}
@Override
public void onAdd(Deployment obj) {
if (!obj.getMetadata().getNamespace().equals(StrimziOperatorManager.OPERATOR_NS) && !obj.getMetadata().getNamespace().equals(FleetShardOperatorManager.OPERATOR_NS)) {
return;
}
// patch any deployment that requests a lot of cpu, and make sure it's on the perf infra
deployments.inNamespace(obj.getMetadata().getNamespace()).withName(obj.getMetadata().getName()).edit(new TypedVisitor<ResourceRequirementsBuilder>() {
@Override
public void visit(ResourceRequirementsBuilder element) {
Quantity cpu = null;
if (element.getRequests() != null) {
cpu = element.getRequests().get("cpu");
}
if (cpu == null && element.getLimits() != null) {
cpu = element.getLimits().get("cpu");
}
if (cpu != null && Quantity.getAmountInBytes(cpu).compareTo(BigDecimal.valueOf(1)) > 0) {
element.addToRequests("cpu", Quantity.parse("1"));
}
}
});
}
});
}
// installs the Strimzi Operator using the OLM bundle
CompletableFuture<Void> strimziFuture = strimziManager.deployStrimziOperator();
cluster.connectNamespaceToMonitoringStack(StrimziOperatorManager.OPERATOR_NS);
// installs a cluster wide fleetshard operator
// not looking at the returned futures - it's assumed that we'll eventually wait on the managed kafka deployment
CompletableFuture<Void> future = FleetShardOperatorManager.deployFleetShardOperator(cluster.kubeClient());
CompletableFuture.allOf(future, strimziFuture).get(2, TimeUnit.MINUTES);
var agentResource = this.cluster.kubeClient().client().resource(new ManagedKafkaAgentBuilder().withNewMetadata().withName(ManagedKafkaAgentResourceClient.RESOURCE_NAME).withNamespace(FleetShardOperatorManager.OPERATOR_NS).endMetadata().withSpec(new ManagedKafkaAgentSpecBuilder().withNewObservability().withAccessToken("").withChannel("").withRepository("").withTag("").endObservability().build()).build());
agentResource.createOrReplace();
// FleetShardOperatorManager.deployFleetShardSync(cluster.kubeClient());
cluster.connectNamespaceToMonitoringStack(FleetShardOperatorManager.OPERATOR_NS);
strimziVersions = SyncApiClient.getSortedAvailableStrimziVersions(() -> agentResource.fromServer().get().getStatus()).collect(Collectors.toList());
}
use of io.fabric8.kubernetes.api.model.apps.DeploymentList in project strimzi by strimzi.
the class MockKube method build.
public KubernetesClient build() {
KubernetesClient mockClient = mock(KubernetesClient.class);
MixedOperation<ConfigMap, ConfigMapList, DoneableConfigMap, Resource<ConfigMap, DoneableConfigMap>> mockCms = buildConfigMaps();
MixedOperation<PersistentVolumeClaim, PersistentVolumeClaimList, DoneablePersistentVolumeClaim, Resource<PersistentVolumeClaim, DoneablePersistentVolumeClaim>> mockPvcs = buildPvcs();
MixedOperation<Endpoints, EndpointsList, DoneableEndpoints, Resource<Endpoints, DoneableEndpoints>> mockEndpoints = buildEndpoints();
MixedOperation<Service, ServiceList, DoneableService, Resource<Service, DoneableService>> mockSvc = buildServices();
MixedOperation<Pod, PodList, DoneablePod, PodResource<Pod, DoneablePod>> mockPods = buildPods();
MixedOperation<StatefulSet, StatefulSetList, DoneableStatefulSet, RollableScalableResource<StatefulSet, DoneableStatefulSet>> mockSs = buildStatefulSets(mockPods);
MixedOperation<Deployment, DeploymentList, DoneableDeployment, ScalableResource<Deployment, DoneableDeployment>> mockDep = buildDeployments();
when(mockClient.configMaps()).thenReturn(mockCms);
when(mockClient.services()).thenReturn(mockSvc);
AppsAPIGroupDSL api = mock(AppsAPIGroupDSL.class);
when(api.statefulSets()).thenReturn(mockSs);
when(mockClient.apps()).thenReturn(api);
ExtensionsAPIGroupDSL ext = mock(ExtensionsAPIGroupDSL.class);
when(mockClient.extensions()).thenReturn(ext);
when(ext.deployments()).thenReturn(mockDep);
when(mockClient.pods()).thenReturn(mockPods);
when(mockClient.endpoints()).thenReturn(mockEndpoints);
when(mockClient.persistentVolumeClaims()).thenReturn(mockPvcs);
return mockClient;
}
use of io.fabric8.kubernetes.api.model.apps.DeploymentList in project jointware by isdream.
the class OpenshiftAPIExample method main.
/**
* @param args
*/
public static void main(String[] args) {
DefaultOpenShiftClient client = createClient();
client.pods();
client.extensions().deployments();
client.replicationControllers();
client.secrets();
MixedOperation<Deployment, DeploymentList, DoneableDeployment, ScalableResource<Deployment, DoneableDeployment>> deployment = client.extensions().deployments();
System.out.println(deployment.list().getItems());
}
Aggregations