Search in sources :

Example 1 with Condition

use of io.fabric8.kubernetes.api.model.Condition in project fabric8 by jboss-fuse.

the class SubsystemResolveContext method findProviders.

@Override
public List<Capability> findProviders(Requirement requirement) {
    List<Capability> caps = new ArrayList<Capability>();
    Region requirerRegion = getRegion(requirement.getResource());
    if (requirerRegion != null) {
        Map<Requirement, Collection<Capability>> resMap = repository.findProviders(Collections.singleton(requirement));
        Collection<Capability> res = resMap != null ? resMap.get(requirement) : null;
        if (res != null && !res.isEmpty()) {
            caps.addAll(res);
        } else if (globalRepository != null) {
            // Only bring in external resources for non optional requirements
            if (!RESOLUTION_OPTIONAL.equals(requirement.getDirectives().get(RESOLUTION_DIRECTIVE))) {
                resMap = globalRepository.findProviders(Collections.singleton(requirement));
                res = resMap != null ? resMap.get(requirement) : null;
                if (res != null && !res.isEmpty()) {
                    caps.addAll(res);
                }
            }
        }
        // Use the digraph to prune non visible capabilities
        Visitor visitor = new Visitor(caps);
        requirerRegion.visitSubgraph(visitor);
        Collection<Capability> allowed = visitor.getAllowed();
        caps.retainAll(allowed);
        // the parent one
        if (caps.size() > 1) {
            Map<String, Resource> providers = new HashMap<String, Resource>();
            for (Capability cap : caps) {
                Resource resource = cap.getResource();
                String id = getSymbolicName(resource) + "|" + getVersion(resource);
                Resource prev = providers.get(id);
                if (prev != null && prev != resource) {
                    Region r1 = getRegion(prev);
                    Region r2 = getRegion(resource);
                    boolean r2canSeeR1 = isResourceVisibleFromRegion(prev, r2);
                    boolean r1canSeeR2 = isResourceVisibleFromRegion(resource, r1);
                    if (r1canSeeR2 && r2canSeeR1) {
                        // r1 and r2 can see each other
                        int reqDiff = prev.getRequirements(null).size() - resource.getRequirements(null).size();
                        if (reqDiff == 0) {
                            String r1Name = getRegion(prev).getName();
                            String r2Name = getRegion(resource).getName();
                            int c = r1Name.compareTo(r2Name);
                            if (c == 0) {
                                // One of the resource has to be a bundle, use that one
                                c = (prev instanceof BundleRevision) ? -1 : +1;
                            }
                            resource = c < 0 ? prev : resource;
                        } else {
                            // one of the resource has less requirements, so use this one
                            // This can be the case when one resource has conditionals, which adds further
                            // requirements to the condition feature.
                            resource = reqDiff < 0 ? prev : resource;
                        }
                    } else {
                        // only one region can see the other, grab the correct
                        resource = r1canSeeR2 ? prev : resource;
                    }
                }
                providers.put(id, resource);
            }
            for (Iterator<Capability> it = caps.iterator(); it.hasNext(); ) {
                Capability cap = it.next();
                if (!providers.values().contains(cap.getResource())) {
                    it.remove();
                }
            }
        }
        // Sort caps
        Collections.sort(caps, candidateComparator);
    }
    return caps;
}
Also used : HostedCapability(org.osgi.service.resolver.HostedCapability) Capability(org.osgi.resource.Capability) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Resource(org.osgi.resource.Resource) Requirement(org.osgi.resource.Requirement) ResourceUtils.addIdentityRequirement(io.fabric8.agent.resolver.ResourceUtils.addIdentityRequirement) BundleRevision(org.osgi.framework.wiring.BundleRevision) Region(org.eclipse.equinox.region.Region) Collection(java.util.Collection)

Example 2 with Condition

use of io.fabric8.kubernetes.api.model.Condition in project fabric8-maven-plugin by fabric8io.

the class AbstractResourceMojo method writeResourcesIndividualAndComposite.

public static File writeResourcesIndividualAndComposite(KubernetesList resources, File resourceFileBase, ResourceFileType resourceFileType, Logger log, Boolean generateRoute) throws MojoExecutionException {
    // Creating a new items list. This will be used to generate openshift.yml
    List<HasMetadata> newItemList = new ArrayList<>();
    if (!generateRoute) {
        // if flag is set false, this will remove the Route resource from resources list
        for (HasMetadata item : resources.getItems()) {
            if (item.getKind().equalsIgnoreCase("Route")) {
                continue;
            }
            newItemList.add(item);
        }
        // update the resource with new list
        resources.setItems(newItemList);
    }
    // entity is object which will be sent to writeResource for openshift.yml
    // if generateRoute is false, this will be set to resources with new list
    // otherwise it will be set to resources with old list.
    Object entity = resources;
    // if the list contains a single Template lets unwrap it
    // in resources already new or old as per condition is set.
    // no need to worry about this for dropping Route.
    Template template = getSingletonTemplate(resources);
    if (template != null) {
        entity = template;
    }
    File file = writeResource(resourceFileBase, entity, resourceFileType);
    // write separate files, one for each resource item
    // resources passed to writeIndividualResources is also new one.
    writeIndividualResources(resources, resourceFileBase, resourceFileType, log, generateRoute);
    return file;
}
Also used : HasMetadata(io.fabric8.kubernetes.api.model.HasMetadata) ArrayList(java.util.ArrayList) File(java.io.File) Template(io.fabric8.openshift.api.model.Template)

Example 3 with Condition

use of io.fabric8.kubernetes.api.model.Condition in project fabric8 by fabric8io.

the class BrokerProducerConsumerIT method testMQConsumer.

@Test
public void testMQConsumer() throws Exception {
    assertThat(client).replicationController(brokerReplicationControllerId).isNotNull();
    assertThat(client).replicationController(consumerReplicationControllerId).isNotNull();
    assertThat(client).pods().runningStatus().filterNamespace(session.getNamespace()).haveAtLeast(1, new Condition<Pod>() {

        @Override
        public boolean matches(Pod podSchema) {
            return true;
        }
    });
    Asserts.assertWaitFor(10 * 60 * 1000, new Block() {

        @Override
        public void invoke() throws Exception {
            J4pClient brokerClient = jolokiaClients.assertClientForReplicationController(brokerReplicationControllerId);
            J4pClient consumerClient = jolokiaClients.assertClientForReplicationController(consumerReplicationControllerId);
            assertThat(consumerClient).stringAttribute("org.apache.camel:context=camel-1,type=context,name=\"camel-1\"", "State").isEqualTo("Started");
            assertThat(brokerClient).longAttribute("org.apache.activemq:type=Broker,brokerName=default,destinationType=Queue,destinationName=TEST.FOO", "EnqueueCount").isGreaterThan(1000);
            assertThat(brokerClient).longAttribute("org.apache.activemq:type=Broker,brokerName=default,destinationType=Queue,destinationName=TEST.FOO", "DequeueCount").isGreaterThan(1000);
        }
    });
}
Also used : Pod(io.fabric8.kubernetes.api.model.Pod) J4pClient(org.jolokia.client.J4pClient) Block(io.fabric8.utils.Block) Test(org.junit.Test)

Example 4 with Condition

use of io.fabric8.kubernetes.api.model.Condition in project fabric8-maven-plugin by fabric8io.

the class ResourceMojo method writeResourcesIndividualAndComposite.

public static File writeResourcesIndividualAndComposite(KubernetesList resources, File resourceFileBase, ResourceFileType resourceFileType, Logger log) throws MojoExecutionException {
    // Creating a new items list. This will be used to generate openshift.yml
    List<HasMetadata> newItemList = new ArrayList<>();
    // entity is object which will be sent to writeResource for openshift.yml
    // if generateRoute is false, this will be set to resources with new list
    // otherwise it will be set to resources with old list.
    Object entity = resources;
    // if the list contains a single Template lets unwrap it
    // in resources already new or old as per condition is set.
    // no need to worry about this for dropping Route.
    Template template = getSingletonTemplate(resources);
    if (template != null) {
        entity = template;
    }
    File file = writeResource(resourceFileBase, entity, resourceFileType);
    // write separate files, one for each resource item
    // resources passed to writeIndividualResources is also new one.
    writeIndividualResources(resources, resourceFileBase, resourceFileType, log);
    return file;
}
Also used : HasMetadata(io.fabric8.kubernetes.api.model.HasMetadata) ArrayList(java.util.ArrayList) File(java.io.File) ResourceMojoUtil.getSingletonTemplate(io.fabric8.maven.plugin.mojo.build.ResourceMojoUtil.getSingletonTemplate) Template(io.fabric8.openshift.api.model.Template)

Example 5 with Condition

use of io.fabric8.kubernetes.api.model.Condition in project strimzi by strimzi.

the class KafkaConnectAssemblyOperatorMockTest method testPauseReconcileUnpause.

@Test
public void testPauseReconcileUnpause(VertxTestContext context) {
    setConnectResource(new KafkaConnectBuilder().withMetadata(new ObjectMetaBuilder().withName(CLUSTER_NAME).withNamespace(NAMESPACE).withLabels(TestUtils.map("foo", "bar")).withAnnotations(singletonMap("strimzi.io/pause-reconciliation", "true")).build()).withNewSpec().withReplicas(replicas).endSpec().build());
    KafkaConnectApi mock = mock(KafkaConnectApi.class);
    when(mock.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList()));
    when(mock.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList()));
    Checkpoint async = context.checkpoint();
    createConnectCluster(context, mock, true).onComplete(context.succeeding()).compose(v -> {
        LOGGER.info("Reconciling again -> update");
        return kco.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME));
    }).onComplete(context.succeeding(v -> context.verify(() -> {
        Resource<KafkaConnect> resource = Crds.kafkaConnectOperation(mockClient).inNamespace(NAMESPACE).withName(CLUSTER_NAME);
        if (resource.get().getStatus() == null) {
            fail();
        }
        List<Condition> conditions = resource.get().getStatus().getConditions();
        boolean conditionFound = false;
        if (conditions != null && !conditions.isEmpty()) {
            for (Condition condition : conditions) {
                if ("ReconciliationPaused".equals(condition.getType())) {
                    conditionFound = true;
                    break;
                }
            }
        }
        assertTrue(conditionFound);
        async.flag();
    }))).compose(v -> {
        setConnectResource(new KafkaConnectBuilder().withMetadata(new ObjectMetaBuilder().withName(CLUSTER_NAME).withNamespace(NAMESPACE).withLabels(TestUtils.map("foo", "bar")).withAnnotations(singletonMap("strimzi.io/pause-reconciliation", "false")).build()).withNewSpec().withReplicas(replicas).endSpec().build());
        LOGGER.info("Reconciling again -> update");
        return kco.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME));
    }).onComplete(context.succeeding(v -> context.verify(() -> {
        Resource<KafkaConnect> resource = Crds.kafkaConnectOperation(mockClient).inNamespace(NAMESPACE).withName(CLUSTER_NAME);
        if (resource.get().getStatus() == null) {
            fail();
        }
        List<Condition> conditions = resource.get().getStatus().getConditions();
        boolean conditionFound = false;
        if (conditions != null && !conditions.isEmpty()) {
            for (Condition condition : conditions) {
                if ("ReconciliationPaused".equals(condition.getType())) {
                    conditionFound = true;
                    break;
                }
            }
        }
        assertFalse(conditionFound);
        async.flag();
    })));
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) KafkaConnectorList(io.strimzi.api.kafka.KafkaConnectorList) KafkaConnector(io.strimzi.api.kafka.model.KafkaConnector) MockKube(io.strimzi.test.mockkube.MockKube) CoreMatchers.notNullValue(org.hamcrest.CoreMatchers.notNullValue) AfterAll(org.junit.jupiter.api.AfterAll) Resource(io.fabric8.kubernetes.client.dsl.Resource) DefaultAdminClientProvider(io.strimzi.operator.common.DefaultAdminClientProvider) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) BeforeAll(org.junit.jupiter.api.BeforeAll) ResourceOperatorSupplier(io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier) ResourceUtils(io.strimzi.operator.cluster.ResourceUtils) KafkaConnect(io.strimzi.api.kafka.model.KafkaConnect) Collections.emptyList(java.util.Collections.emptyList) KafkaVersion(io.strimzi.operator.cluster.model.KafkaVersion) KafkaConnectBuilder(io.strimzi.api.kafka.model.KafkaConnectBuilder) VertxExtension(io.vertx.junit5.VertxExtension) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) List(java.util.List) Logger(org.apache.logging.log4j.Logger) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) Checkpoint(io.vertx.junit5.Checkpoint) Condition(io.strimzi.api.kafka.model.status.Condition) KafkaConnectList(io.strimzi.api.kafka.KafkaConnectList) PlatformFeaturesAvailability(io.strimzi.operator.PlatformFeaturesAvailability) ClusterOperatorConfig(io.strimzi.operator.cluster.ClusterOperatorConfig) Mockito.mock(org.mockito.Mockito.mock) VertxTestContext(io.vertx.junit5.VertxTestContext) Assertions.fail(org.junit.jupiter.api.Assertions.fail) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) BackOff(io.strimzi.operator.common.BackOff) Crds(io.strimzi.api.kafka.Crds) FeatureGates(io.strimzi.operator.cluster.FeatureGates) KafkaVersionTestUtils(io.strimzi.operator.cluster.KafkaVersionTestUtils) ZookeeperLeaderFinder(io.strimzi.operator.cluster.operator.resource.ZookeeperLeaderFinder) TestUtils(io.strimzi.test.TestUtils) Collections.singletonMap(java.util.Collections.singletonMap) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) ArgumentMatchers.anyInt(org.mockito.ArgumentMatchers.anyInt) CoreMatchers.nullValue(org.hamcrest.CoreMatchers.nullValue) ObjectMetaBuilder(io.fabric8.kubernetes.api.model.ObjectMetaBuilder) Promise(io.vertx.core.Promise) KubernetesVersion(io.strimzi.operator.KubernetesVersion) Vertx(io.vertx.core.Vertx) Mockito.when(org.mockito.Mockito.when) Mockito.verify(org.mockito.Mockito.verify) Reconciliation(io.strimzi.operator.common.Reconciliation) AfterEach(org.junit.jupiter.api.AfterEach) Mockito.never(org.mockito.Mockito.never) KubernetesClient(io.fabric8.kubernetes.client.KubernetesClient) DefaultZookeeperScalerProvider(io.strimzi.operator.cluster.operator.resource.DefaultZookeeperScalerProvider) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) KafkaConnectResources(io.strimzi.api.kafka.model.KafkaConnectResources) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) Condition(io.strimzi.api.kafka.model.status.Condition) KafkaConnectBuilder(io.strimzi.api.kafka.model.KafkaConnectBuilder) Checkpoint(io.vertx.junit5.Checkpoint) Reconciliation(io.strimzi.operator.common.Reconciliation) KafkaConnect(io.strimzi.api.kafka.model.KafkaConnect) ObjectMetaBuilder(io.fabric8.kubernetes.api.model.ObjectMetaBuilder) Test(org.junit.jupiter.api.Test)

Aggregations

Condition (io.strimzi.api.kafka.model.status.Condition)46 Collections (java.util.Collections)36 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)32 Vertx (io.vertx.core.Vertx)30 Labels (io.strimzi.operator.common.model.Labels)28 Future (io.vertx.core.Future)28 BeforeAll (org.junit.jupiter.api.BeforeAll)28 Map (java.util.Map)27 Test (org.junit.jupiter.api.Test)25 Promise (io.vertx.core.Promise)22 List (java.util.List)21 Checkpoint (io.vertx.junit5.Checkpoint)20 VertxExtension (io.vertx.junit5.VertxExtension)20 VertxTestContext (io.vertx.junit5.VertxTestContext)20 CoreMatchers.is (org.hamcrest.CoreMatchers.is)20 AfterAll (org.junit.jupiter.api.AfterAll)20 ExtendWith (org.junit.jupiter.api.extension.ExtendWith)20 Annotations (io.strimzi.operator.common.Annotations)18 HasMetadata (io.fabric8.kubernetes.api.model.HasMetadata)16 KubernetesClient (io.fabric8.kubernetes.client.KubernetesClient)16