use of io.strimzi.api.kafka.model.status.KafkaConnectStatus in project strimzi by strimzi.
the class CustomResourceStatusIsolatedST method assertKafkaConnectStatus.
void assertKafkaConnectStatus(long expectedObservedGeneration, String expectedUrl) {
KafkaConnectStatus kafkaConnectStatus = KafkaConnectResource.kafkaConnectClient().inNamespace(clusterOperator.getDeploymentNamespace()).withName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME).get().getStatus();
assertThat("Kafka Connect cluster status has incorrect Observed Generation", kafkaConnectStatus.getObservedGeneration(), is(expectedObservedGeneration));
assertThat("Kafka Connect cluster status has incorrect URL", kafkaConnectStatus.getUrl(), is(expectedUrl));
validateConnectPlugins(kafkaConnectStatus.getConnectorPlugins());
}
use of io.strimzi.api.kafka.model.status.KafkaConnectStatus in project strimzi by strimzi.
the class ConnectIsolatedST method testScaleConnectWithConnectorToZero.
@ParallelNamespaceTest
@Tag(SCALABILITY)
@Tag(CONNECTOR_OPERATOR)
void testScaleConnectWithConnectorToZero(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(clusterOperator.getDeploymentNamespace(), extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnectWithFilePlugin(namespaceName, clusterName, 2).editMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().build());
resourceManager.createResource(extensionContext, KafkaConnectorTemplates.kafkaConnector(clusterName).editSpec().withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector").addToConfig("file", Constants.DEFAULT_SINK_FILE_PATH).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("topics", topicName).endSpec().build());
String connectDeploymentName = KafkaConnectResources.deploymentName(clusterName);
List<Pod> connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.deploymentName(clusterName));
assertThat(connectPods.size(), is(2));
// scale down
LOGGER.info("Scaling KafkaConnect down to zero");
KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, kafkaConnect -> kafkaConnect.getSpec().setReplicas(0), namespaceName);
KafkaConnectUtils.waitForConnectReady(namespaceName, clusterName);
PodUtils.waitForPodsReady(namespaceName, kubeClient(namespaceName).getDeploymentSelectors(namespaceName, connectDeploymentName), 0, true);
connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.deploymentName(clusterName));
KafkaConnectStatus connectStatus = KafkaConnectResource.kafkaConnectClient().inNamespace(namespaceName).withName(clusterName).get().getStatus();
KafkaConnectorStatus connectorStatus = KafkaConnectorResource.kafkaConnectorClient().inNamespace(namespaceName).withName(clusterName).get().getStatus();
assertThat(connectPods.size(), is(0));
assertThat(connectStatus.getConditions().get(0).getType(), is(Ready.toString()));
assertThat(connectorStatus.getConditions().stream().anyMatch(condition -> condition.getType().equals(NotReady.toString())), is(true));
assertThat(connectorStatus.getConditions().stream().anyMatch(condition -> condition.getMessage().contains("has 0 replicas")), is(true));
}
use of io.strimzi.api.kafka.model.status.KafkaConnectStatus in project strimzi by strimzi.
the class ConnectIsolatedST method testScaleConnectWithoutConnectorToZero.
@ParallelNamespaceTest
@Tag(SCALABILITY)
void testScaleConnectWithoutConnectorToZero(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(clusterOperator.getDeploymentNamespace(), extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnectWithFilePlugin(namespaceName, clusterName, 2).build());
final String connectDeploymentName = KafkaConnectResources.deploymentName(clusterName);
List<Pod> connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.deploymentName(clusterName));
assertThat(connectPods.size(), is(2));
// scale down
LOGGER.info("Scaling KafkaConnect down to zero");
KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, kafkaConnect -> kafkaConnect.getSpec().setReplicas(0), namespaceName);
KafkaConnectUtils.waitForConnectReady(namespaceName, clusterName);
PodUtils.waitForPodsReady(kubeClient(namespaceName).getDeploymentSelectors(namespaceName, connectDeploymentName), 0, true);
connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.deploymentName(clusterName));
KafkaConnectStatus connectStatus = KafkaConnectResource.kafkaConnectClient().inNamespace(namespaceName).withName(clusterName).get().getStatus();
assertThat(connectPods.size(), is(0));
assertThat(connectStatus.getConditions().get(0).getType(), is(Ready.toString()));
}
use of io.strimzi.api.kafka.model.status.KafkaConnectStatus in project strimzi by strimzi.
the class KafkaConnectAssemblyOperatorTest method createKafkaConnectCluster.
public void createKafkaConnectCluster(VertxTestContext context, KafkaConnect kc, boolean connectorOperator) {
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true);
var mockConnectOps = supplier.connectOperator;
DeploymentOperator mockDcOps = supplier.deploymentOperations;
PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator;
ConfigMapOperator mockCmOps = supplier.configMapOperations;
ServiceOperator mockServiceOps = supplier.serviceOperations;
NetworkPolicyOperator mockNetPolOps = supplier.networkPolicyOperator;
PodOperator mockPodOps = supplier.podOperations;
BuildConfigOperator mockBcOps = supplier.buildConfigOperations;
SecretOperator mockSecretOps = supplier.secretOperations;
CrdOperator<KubernetesClient, KafkaConnector, KafkaConnectorList> mockConnectorOps = supplier.kafkaConnectorOperator;
when(mockConnectorOps.listAsync(anyString(), any(Optional.class))).thenReturn(Future.succeededFuture(emptyList()));
when(mockConnectOps.get(kc.getMetadata().getNamespace(), kc.getMetadata().getName())).thenReturn(kc);
when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kc));
ArgumentCaptor<Service> serviceCaptor = ArgumentCaptor.forClass(Service.class);
when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture());
ArgumentCaptor<Deployment> dcCaptor = ArgumentCaptor.forClass(Deployment.class);
when(mockDcOps.reconcile(any(), anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture());
when(mockDcOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture());
when(mockDcOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42));
when(mockDcOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42));
when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap())));
when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture());
when(mockSecretOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture());
when(mockPodOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null)));
when(mockBcOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null)));
ArgumentCaptor<NetworkPolicy> npCaptor = ArgumentCaptor.forClass(NetworkPolicy.class);
when(mockNetPolOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), npCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy())));
ArgumentCaptor<PodDisruptionBudget> pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class);
when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture());
ArgumentCaptor<KafkaConnect> connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class);
when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture());
KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class);
when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList()));
ConnectorPlugin plugin1 = new ConnectorPluginBuilder().withConnectorClass("io.strimzi.MyClass").withType("sink").withVersion("1.0.0").build();
when(mockConnectClient.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1)));
when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture());
KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient);
KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS);
Checkpoint async = context.checkpoint();
ops.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, kc.getMetadata().getNamespace(), kc.getMetadata().getName())).onComplete(context.succeeding(v -> context.verify(() -> {
// Verify service
List<Service> capturedServices = serviceCaptor.getAllValues();
assertThat(capturedServices, hasSize(1));
Service service = capturedServices.get(0);
assertThat(service.getMetadata().getName(), is(connect.getServiceName()));
assertThat(service, is(connect.generateService()));
// Verify Deployment
List<Deployment> capturedDc = dcCaptor.getAllValues();
assertThat(capturedDc, hasSize(1));
Deployment dc = capturedDc.get(0);
assertThat(dc.getMetadata().getName(), is(connect.getName()));
Map<String, String> annotations = new HashMap<>();
annotations.put(Annotations.ANNO_STRIMZI_LOGGING_DYNAMICALLY_UNCHANGEABLE_HASH, Util.hashStub(Util.getLoggingDynamicallyUnmodifiableEntries(LOGGING_CONFIG)));
annotations.put(Annotations.ANNO_STRIMZI_AUTH_HASH, "0");
assertThat(dc, is(connect.generateDeployment(annotations, true, null, null)));
// Verify PodDisruptionBudget
List<PodDisruptionBudget> capturedPdb = pdbCaptor.getAllValues();
assertThat(capturedPdb.size(), is(1));
PodDisruptionBudget pdb = capturedPdb.get(0);
assertThat(pdb.getMetadata().getName(), is(connect.getName()));
assertThat(pdb, is(connect.generatePodDisruptionBudget()));
// Verify status
List<KafkaConnect> capturedConnects = connectCaptor.getAllValues();
assertThat(capturedConnects, hasSize(1));
KafkaConnectStatus connectStatus = capturedConnects.get(0).getStatus();
assertThat(connectStatus.getUrl(), is("http://foo-connect-api.test.svc:8083"));
assertThat(connectStatus.getReplicas(), is(connect.getReplicas()));
assertThat(connectStatus.getLabelSelector(), is(connect.getSelectorLabels().toSelectorString()));
assertThat(connectStatus.getConditions().get(0).getStatus(), is("True"));
assertThat(connectStatus.getConditions().get(0).getType(), is("Ready"));
if (connectorOperator) {
assertThat(npCaptor.getValue(), is(notNullValue()));
assertThat(connectStatus.getConnectorPlugins(), hasSize(1));
assertThat(connectStatus.getConnectorPlugins().get(0).getConnectorClass(), is("io.strimzi.MyClass"));
assertThat(connectStatus.getConnectorPlugins().get(0).getType(), is("sink"));
assertThat(connectStatus.getConnectorPlugins().get(0).getVersion(), is("1.0.0"));
} else {
assertThat(npCaptor.getValue(), is(nullValue()));
assertThat(connectStatus.getConnectorPlugins(), nullValue());
}
async.flag();
})));
}
use of io.strimzi.api.kafka.model.status.KafkaConnectStatus in project strimzi by strimzi.
the class KafkaConnectBuildAssemblyOperatorKubeTest method testUpdateWithBuildImageChangeOnKube.
@SuppressWarnings({ "checkstyle:MethodLength" })
@Test
public void testUpdateWithBuildImageChangeOnKube(VertxTestContext context) {
Plugin plugin1 = new PluginBuilder().withName("plugin1").withArtifacts(new JarArtifactBuilder().withUrl("https://my-domain.tld/my.jar").build()).build();
KafkaConnect oldKc = new KafkaConnectBuilder().withNewMetadata().withName(NAME).withNamespace(NAMESPACE).endMetadata().withNewSpec().withReplicas(1).withBootstrapServers("my-cluster-kafka-bootstrap:9092").withNewBuild().withNewDockerOutput().withImage(OUTPUT_IMAGE).withPushSecret("my-docker-credentials").endDockerOutput().withPlugins(plugin1).endBuild().endSpec().build();
KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS);
KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS);
KafkaConnect kc = new KafkaConnectBuilder(oldKc).editSpec().editBuild().withNewDockerOutput().withImage("my-connect-build-2:blah").withPushSecret("my-docker-credentials").endDockerOutput().withPlugins(plugin1).endBuild().endSpec().build();
KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS);
// Prepare and get mocks
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true);
CrdOperator mockConnectOps = supplier.connectOperator;
DeploymentOperator mockDepOps = supplier.deploymentOperations;
PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator;
PodDisruptionBudgetV1Beta1Operator mockPdbOpsV1Beta1 = supplier.podDisruptionBudgetV1Beta1Operator;
ConfigMapOperator mockCmOps = supplier.configMapOperations;
ServiceOperator mockServiceOps = supplier.serviceOperations;
NetworkPolicyOperator mockNetPolOps = supplier.networkPolicyOperator;
PodOperator mockPodOps = supplier.podOperations;
BuildConfigOperator mockBcOps = supplier.buildConfigOperations;
SecretOperator mockSecretOps = supplier.secretOperations;
CrdOperator<KubernetesClient, KafkaConnector, KafkaConnectorList> mockConnectorOps = supplier.kafkaConnectorOperator;
// Mock KafkaConnector ops
when(mockConnectorOps.listAsync(anyString(), any(Optional.class))).thenReturn(Future.succeededFuture(emptyList()));
// Mock KafkaConnect ops
when(mockConnectOps.get(NAMESPACE, NAME)).thenReturn(kc);
when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kc));
// Mock and capture service ops
ArgumentCaptor<Service> serviceCaptor = ArgumentCaptor.forClass(Service.class);
when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture());
// Mock and capture deployment ops
ArgumentCaptor<Deployment> depCaptor = ArgumentCaptor.forClass(Deployment.class);
when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture());
when(mockDepOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)))).thenAnswer(inv -> {
Deployment dep = oldConnect.generateDeployment(emptyMap(), false, null, null);
dep.getSpec().getTemplate().getMetadata().getAnnotations().put(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, oldBuild.generateDockerfile().hashStub() + Util.hashStub(oldBuild.getBuild().getOutput().getImage()));
dep.getSpec().getTemplate().getSpec().getContainers().get(0).setImage("my-connect-build-2@sha256:olddigest");
return Future.succeededFuture(dep);
});
when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42));
when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42));
when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture());
// Mock and capture CM ops
when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap())));
ArgumentCaptor<ConfigMap> dockerfileCaptor = ArgumentCaptor.forClass(ConfigMap.class);
when(mockCmOps.reconcile(any(), anyString(), eq(KafkaConnectResources.dockerFileConfigMapName(NAME)), dockerfileCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap())));
// Mock and capture Pod ops
ArgumentCaptor<Pod> builderPodCaptor = ArgumentCaptor.forClass(Pod.class);
when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), builderPodCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null)));
Pod terminatedPod = new PodBuilder().withNewMetadata().withName(KafkaConnectResources.buildPodName(NAME)).withNamespace(NAMESPACE).endMetadata().withNewSpec().endSpec().withNewStatus().withContainerStatuses(new ContainerStatusBuilder().withNewState().withNewTerminated().withExitCode(0).withMessage("my-connect-build-2@sha256:blablabla").endTerminated().endState().build()).endStatus().build();
when(mockPodOps.waitFor(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture((Void) null));
when(mockPodOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)))).thenReturn(Future.succeededFuture(null), Future.succeededFuture(terminatedPod));
// Mock and capture BuildConfig ops
when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null)));
// Mock and capture NP ops
when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy())));
// Mock and capture PDB ops
when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture());
when(mockPdbOpsV1Beta1.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture());
// Mock and capture KafkaConnect ops for status update
ArgumentCaptor<KafkaConnect> connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class);
when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture());
// Mock KafkaConnect API client
KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class);
// Prepare and run reconciliation
KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient);
KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS);
Checkpoint async = context.checkpoint();
ops.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, NAME)).onComplete(context.succeeding(v -> context.verify(() -> {
// Verify Deployment
List<Deployment> capturedDeps = depCaptor.getAllValues();
assertThat(capturedDeps, hasSize(1));
Deployment dep = capturedDeps.get(0);
assertThat(dep.getMetadata().getName(), is(connect.getName()));
assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getImage(), is("my-connect-build-2@sha256:blablabla"));
assertThat(Annotations.stringAnnotation(dep.getSpec().getTemplate(), Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, null), is(build.generateDockerfile().hashStub() + Util.hashStub(build.getBuild().getOutput().getImage())));
// Verify ConfigMap
List<ConfigMap> capturedCms = dockerfileCaptor.getAllValues();
assertThat(capturedCms, hasSize(1));
ConfigMap dockerfileCm = capturedCms.get(0);
assertThat(dockerfileCm.getData().containsKey("Dockerfile"), is(true));
assertThat(dockerfileCm.getData().get("Dockerfile"), is(build.generateDockerfile().getDockerfile()));
// Verify builder Pod
List<Pod> capturedBuilderPods = builderPodCaptor.getAllValues();
assertThat(capturedBuilderPods, hasSize(2));
assertThat(capturedBuilderPods.stream().filter(pod -> pod != null).collect(Collectors.toList()), hasSize(1));
// Verify status
List<KafkaConnect> capturedConnects = connectCaptor.getAllValues();
assertThat(capturedConnects, hasSize(1));
KafkaConnectStatus connectStatus = capturedConnects.get(0).getStatus();
assertThat(connectStatus.getConditions().get(0).getStatus(), is("True"));
assertThat(connectStatus.getConditions().get(0).getType(), is("Ready"));
async.flag();
})));
}
Aggregations