use of io.fabric8.kubernetes.api.model.Context in project strimzi by strimzi.
the class AbstractResourceOperatorTest method deleteWhenResourceExistsStillDeletes.
@Test
public void deleteWhenResourceExistsStillDeletes(TestContext context) {
T resource = resource();
Resource mockResource = mock(resourceType());
when(mockResource.get()).thenReturn(resource);
NonNamespaceOperation mockNameable = mock(NonNamespaceOperation.class);
when(mockNameable.withName(matches(RESOURCE_NAME))).thenReturn(mockResource);
MixedOperation mockCms = mock(MixedOperation.class);
when(mockCms.inNamespace(matches(NAMESPACE))).thenReturn(mockNameable);
C mockClient = mock(clientType());
mocker(mockClient, mockCms);
AbstractResourceOperator<C, T, L, D, R, P> op = createResourceOperations(vertx, mockClient);
Async async = context.async();
op.reconcile(resource.getMetadata().getNamespace(), resource.getMetadata().getName(), null).setHandler(ar -> {
assertTrue(ar.succeeded());
verify(mockResource).delete();
async.complete();
});
}
use of io.fabric8.kubernetes.api.model.Context in project strimzi by strimzi.
the class AbstractResourceOperatorTest method createWhenExistsIsAPatch.
public void createWhenExistsIsAPatch(TestContext context, boolean cascade) {
T resource = resource();
Resource mockResource = mock(resourceType());
when(mockResource.get()).thenReturn(resource);
when(mockResource.cascading(cascade)).thenReturn(mockResource);
NonNamespaceOperation mockNameable = mock(NonNamespaceOperation.class);
when(mockNameable.withName(matches(resource.getMetadata().getName()))).thenReturn(mockResource);
MixedOperation mockCms = mock(MixedOperation.class);
when(mockCms.inNamespace(matches(resource.getMetadata().getNamespace()))).thenReturn(mockNameable);
C mockClient = mock(clientType());
mocker(mockClient, mockCms);
AbstractResourceOperator<C, T, L, D, R, P> op = createResourceOperations(vertx, mockClient);
Async async = context.async();
Future<ReconcileResult<P>> fut = op.createOrUpdate(resource);
fut.setHandler(ar -> {
assertTrue(ar.succeeded());
verify(mockResource).get();
verify(mockResource).patch(any());
verify(mockResource, never()).create(any());
verify(mockResource, never()).createNew();
verify(mockResource, never()).createOrReplace(any());
verify(mockCms, never()).createOrReplace(any());
async.complete();
});
}
use of io.fabric8.kubernetes.api.model.Context in project strimzi by strimzi.
the class ClusterControllerTest method startStop.
/**
* Does the CC start and then stop a verticle per namespace?
* @param context
* @param namespaces
*/
private void startStop(TestContext context, String namespaces) {
AtomicInteger numWatchers = new AtomicInteger(0);
KubernetesClient client = mock(KubernetesClient.class);
MixedOperation mockCms = mock(MixedOperation.class);
when(client.configMaps()).thenReturn(mockCms);
List<String> namespaceList = asList(namespaces.split(" *,+ *"));
for (String namespace : namespaceList) {
MixedOperation mockNamespacedCms = mock(MixedOperation.class);
when(mockNamespacedCms.watch(any())).thenAnswer(invo -> {
numWatchers.incrementAndGet();
Watch mockWatch = mock(Watch.class);
doAnswer(invo2 -> {
((Watcher) invo.getArgument(0)).onClose(null);
return null;
}).when(mockWatch).close();
return mockWatch;
});
when(mockNamespacedCms.withLabels(any())).thenReturn(mockNamespacedCms);
when(mockCms.inNamespace(namespace)).thenReturn(mockNamespacedCms);
}
Async async = context.async();
Map<String, String> env = new HashMap<>();
env.put(ClusterControllerConfig.STRIMZI_NAMESPACE, namespaces);
env.put(ClusterControllerConfig.STRIMZI_CONFIGMAP_LABELS, STRIMZI_IO_KIND_CLUSTER);
env.put(ClusterControllerConfig.STRIMZI_FULL_RECONCILIATION_INTERVAL_MS, "120000");
Main.run(vertx, client, true, env).setHandler(ar -> {
context.assertNull(ar.cause(), "Expected all verticles to start OK");
async.complete();
});
async.await();
context.assertEquals(namespaceList.size(), vertx.deploymentIDs().size(), "A verticle per namespace");
List<Async> asyncs = new ArrayList<>();
for (String deploymentId : vertx.deploymentIDs()) {
Async async2 = context.async();
asyncs.add(async2);
vertx.undeploy(deploymentId, ar -> {
context.assertNull(ar.cause(), "Didn't expect error when undeploying verticle " + deploymentId);
async2.complete();
});
}
for (Async async2 : asyncs) {
async2.await();
}
if (numWatchers.get() > namespaceList.size()) {
context.fail("Looks like there were more watchers than namespaces");
}
}
use of io.fabric8.kubernetes.api.model.Context in project strimzi by strimzi.
the class KafkaAssemblyOperatorMockIT method assertStorageClass.
private void assertStorageClass(TestContext context, String statefulSetName, String expectedClass) {
StatefulSet statefulSet = mockClient.apps().statefulSets().inNamespace(NAMESPACE).withName(statefulSetName).get();
context.assertNotNull(statefulSet);
// Check the storage class is initially "foo"
List<PersistentVolumeClaim> volumeClaimTemplates = statefulSet.getSpec().getVolumeClaimTemplates();
context.assertFalse(volumeClaimTemplates.isEmpty());
context.assertEquals(expectedClass, volumeClaimTemplates.get(0).getSpec().getStorageClassName());
}
use of io.fabric8.kubernetes.api.model.Context in project strimzi by strimzi.
the class KafkaAssemblyOperatorMockIT method testUpdateKafkaWithChangedDeleteClaim.
/**
* Test that we can change the deleteClaim flag, and that it's honoured
*/
@Test
public void testUpdateKafkaWithChangedDeleteClaim(TestContext context) {
if (!Storage.StorageType.PERSISTENT_CLAIM.equals(storageType(kafkaStorage))) {
LOGGER.info("Skipping claim-based test because using storage type {}", kafkaStorage);
return;
}
Set<String> allPvcs = new HashSet<>();
Set<String> kafkaPvcs = createPvcs(kafkaStorage, kafkaReplicas, podId -> KafkaCluster.getPersistentVolumeClaimName(KafkaCluster.kafkaClusterName(CLUSTER_NAME), podId));
Set<String> zkPvcs = createPvcs(zkStorage, zkReplicas, podId -> ZookeeperCluster.getPersistentVolumeClaimName(ZookeeperCluster.zookeeperClusterName(CLUSTER_NAME), podId));
allPvcs.addAll(kafkaPvcs);
allPvcs.addAll(zkPvcs);
KafkaAssemblyOperator kco = createCluster(context);
boolean originalKafkaDeleteClaim = deleteClaim(kafkaStorage);
// assertDeleteClaim(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalKafkaDeleteClaim);
// Try to update the storage class
boolean changedKafkaDeleteClaim = !originalKafkaDeleteClaim;
HashMap<String, String> data = new HashMap<>(cluster.getData());
data.put(KafkaCluster.KEY_STORAGE, new JsonObject(kafkaStorage.toString()).put(Storage.DELETE_CLAIM_FIELD, changedKafkaDeleteClaim).toString());
ConfigMap changedClusterCm = new ConfigMapBuilder(cluster).withData(data).build();
mockClient.configMaps().inNamespace(NAMESPACE).withName(CLUSTER_NAME).patch(changedClusterCm);
LOGGER.info("Updating with changed delete claim");
Async updateAsync = context.async();
kco.reconcileAssembly(new Reconciliation("test-trigger", AssemblyType.KAFKA, NAMESPACE, CLUSTER_NAME), ar -> {
if (ar.failed())
ar.cause().printStackTrace();
context.assertTrue(ar.succeeded());
updateAsync.complete();
});
updateAsync.await();
LOGGER.info("Reconciling again -> delete");
mockClient.configMaps().inNamespace(NAMESPACE).withName(CLUSTER_NAME).delete();
Async deleteAsync = context.async();
kco.reconcileAssembly(new Reconciliation("test-trigger", AssemblyType.KAFKA, NAMESPACE, CLUSTER_NAME), ar -> {
if (ar.failed())
ar.cause().printStackTrace();
context.assertTrue(ar.succeeded());
assertPvcs(context, changedKafkaDeleteClaim ? deleteClaim(zkStorage) ? emptySet() : zkPvcs : deleteClaim(zkStorage) ? kafkaPvcs : allPvcs);
deleteAsync.complete();
});
}
Aggregations