Search in sources :

Example 6 with ClusterInfoException

use of net.dempsy.cluster.ClusterInfoException in project dempsy-commons by Dempsy.

the class LocalClusterSessionFactory method doormdir.

private static synchronized EntryAndParent doormdir(final String path) throws ClusterInfoException {
    final Entry ths = entries.get(path);
    if (ths == null)
        throw new ClusterInfoException("rmdir of non existant node \"" + path + "\"");
    final Entry parent = entries.get(parent(path));
    entries.remove(path);
    if (parent != null) {
        final int lastSlash = path.lastIndexOf('/');
        parent.children.remove(path.substring(lastSlash + 1));
    }
    return new EntryAndParent(ths, parent);
}
Also used : ClusterInfoException(net.dempsy.cluster.ClusterInfoException)

Example 7 with ClusterInfoException

use of net.dempsy.cluster.ClusterInfoException in project Dempsy by Dempsy.

the class OutgoingDispatcher method start.

@Override
public void start(final Infrastructure infra) {
    final ClusterInfoSession session = infra.getCollaborator();
    final String nodesDir = infra.getRootPaths().nodesDir;
    checkup = new PersistentTask(LOGGER, isRunning, infra.getScheduler(), RETRY_TIMEOUT) {

        @Override
        public boolean execute() {
            try {
                // collect up all NodeInfo's known about.
                session.recursiveMkdir(nodesDir, null, DirMode.PERSISTENT, DirMode.PERSISTENT);
                final Collection<String> nodeDirs = session.getSubdirs(nodesDir, this);
                final Set<NodeInformation> alreadySeen = new HashSet<>();
                // get all of the subdirectories NodeInformations
                for (final String subdir : nodeDirs) {
                    final NodeInformation ni = (NodeInformation) session.getData(nodesDir + "/" + subdir, null);
                    if (ni == null) {
                        LOGGER.warn("A node directory was empty at " + subdir);
                        return false;
                    }
                    // see if node info is dupped.
                    if (alreadySeen.contains(ni)) {
                        LOGGER.warn("The node " + ni.nodeAddress + " seems to be registed more than once.");
                        continue;
                    }
                    if (ni.clusterInfoByClusterId.size() == 0) {
                        // it's ALL adaptor so there's no sense in dealing with it
                        LOGGER.trace("NodeInformation {} appears to be only an Adaptor.", ni);
                        continue;
                    }
                    alreadySeen.add(ni);
                }
                // check to see if there's new nodes.
                final ApplicationState.Update ud = outbounds.get().update(alreadySeen, thisNode, thisNodeId);
                if (!ud.change()) {
                    isReady.set(true);
                    // nothing to update.
                    return true;
                } else if (LOGGER.isTraceEnabled())
                    LOGGER.trace("Updating for " + thisNodeId);
                // otherwise we will be making changes so remove the current ApplicationState
                // this can cause instability.
                final ApplicationState obs = outbounds.getAndSet(null);
                try {
                    final ApplicationState newState = obs.apply(ud, tmanager, statsCollector, manager);
                    outbounds.set(newState);
                    isReady.set(true);
                    return true;
                } catch (final RuntimeException rte) {
                    // if we threw an exception after clearing the outbounds we need to restore it.
                    // This is likely a configuration error so we should probably warn about it.
                    LOGGER.warn("Unexpected exception while applying a topology update", rte);
                    outbounds.set(obs);
                    throw rte;
                }
            } catch (final ClusterInfoException e) {
                final String message = "Failed to find outgoing route information. Will retry shortly.";
                if (LOGGER.isTraceEnabled())
                    LOGGER.debug(message, e);
                else
                    LOGGER.debug(message);
                return false;
            }
        }

        @Override
        public String toString() {
            return "find nodes to route to";
        }
    };
    outbounds.set(new ApplicationState(tmanager, thisNode));
    isRunning.set(true);
    checkup.process();
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) ApplicationState(net.dempsy.intern.ApplicationState) ClusterInfoException(net.dempsy.cluster.ClusterInfoException) Collection(java.util.Collection) ClusterInfoSession(net.dempsy.cluster.ClusterInfoSession) SafeString(net.dempsy.util.SafeString) PersistentTask(net.dempsy.utils.PersistentTask)

Example 8 with ClusterInfoException

use of net.dempsy.cluster.ClusterInfoException in project Dempsy by Dempsy.

the class SimpleInboundSide method start.

@Override
public void start(final Infrastructure infra) {
    this.session = infra.getCollaborator();
    this.rootDir = infra.getRootPaths().clustersDir + "/" + clusterId.clusterName;
    this.registerer = new PersistentTask(LOGGER, isRunning, infra.getScheduler(), RETRY_TIMEOUT) {

        @Override
        public boolean execute() {
            try {
                // check if we're still here.
                if (actualDir != null) {
                    // is actualDir still there?
                    if (session.exists(actualDir, null)) {
                        isReady.set(true);
                        return true;
                    }
                }
                session.recursiveMkdir(rootDir, null, DirMode.PERSISTENT, DirMode.PERSISTENT);
                actualDir = session.mkdir(rootDir + "/" + SIMPLE_SUBDIR, address, DirMode.EPHEMERAL_SEQUENTIAL);
                session.exists(actualDir, this);
                LOGGER.debug("Registed " + SimpleInboundSide.class.getSimpleName() + " at " + actualDir);
                isReady.set(true);
                return true;
            } catch (final ClusterInfoException e) {
                final String message = "Failed to register " + SimpleInboundSide.class.getSimpleName() + " for cluster " + clusterId + ". Will retry shortly.";
                if (LOGGER.isTraceEnabled())
                    LOGGER.debug(message, e);
                else
                    LOGGER.debug(message);
                return false;
            }
        }

        @Override
        public String toString() {
            return "register " + SimpleInboundSide.class.getSimpleName() + " for cluster " + clusterId;
        }
    };
    isRunning.set(true);
    registerer.process();
}
Also used : ClusterInfoException(net.dempsy.cluster.ClusterInfoException) PersistentTask(net.dempsy.utils.PersistentTask)

Example 9 with ClusterInfoException

use of net.dempsy.cluster.ClusterInfoException in project Dempsy by Dempsy.

the class OutgoingDispatcher method start.

@Override
public void start(final Infrastructure infra) {
    final ClusterInfoSession session = infra.getCollaborator();
    final String nodesDir = infra.getRootPaths().nodesDir;
    checkup = new PersistentTask(LOGGER_SESSION, isRunning, infra.getScheduler(), RETRY_TIMEOUT) {

        @Override
        public boolean execute() {
            try {
                // collect up all NodeInfo's known about.
                session.recursiveMkdir(nodesDir, null, DirMode.PERSISTENT, DirMode.PERSISTENT);
                final Collection<String> nodeDirs = session.getSubdirs(nodesDir, this);
                final Set<NodeInformation> alreadySeen = new HashSet<>();
                // get all of the subdirectories NodeInformations
                for (final String subdir : nodeDirs) {
                    final NodeInformation ni = (NodeInformation) session.getData(nodesDir + "/" + subdir, null);
                    if (ni == null) {
                        LOGGER_SESSION.warn("[{}] A node directory was empty at " + subdir, thisNodeId);
                        return false;
                    }
                    // see if node info is dupped.
                    if (alreadySeen.contains(ni)) {
                        LOGGER_SESSION.warn("[{}] The node " + ni.nodeAddress + " seems to be registed more than once.", thisNodeId);
                        continue;
                    }
                    if (ni.clusterInfoByClusterId.size() == 0) {
                        // it's ALL adaptor so there's no sense in dealing with it
                        LOGGER_SESSION.trace("[{}] NodeInformation {} appears to be only an Adaptor.", thisNodeId, ni);
                        continue;
                    }
                    alreadySeen.add(ni);
                }
                // check to see if there's new nodes.
                final ApplicationState.Update ud = outbounds.get().update(alreadySeen, thisNode, thisNodeId);
                if (!ud.change()) {
                    LOGGER_SESSION.info("[{}] Topology change notification resulted in no changes.", thisNodeId);
                    isReady.set(true);
                    // nothing to update.
                    return true;
                } else if (LOGGER_SESSION.isTraceEnabled())
                    LOGGER_SESSION.info("[{}] Topology change notification resulted in changes ", thisNodeId);
                // otherwise we will be making changes so remove the current ApplicationState
                // this can cause instability.
                final ApplicationState obs = outbounds.getAndSet(null);
                try {
                    final ApplicationState newState = obs.apply(ud, tmanager, statsCollector, manager, thisNodeId);
                    outbounds.set(newState);
                    isReady.set(true);
                    return true;
                } catch (final RuntimeException rte) {
                    // if we threw an exception after clearing the outbounds we need to restore it.
                    // This is likely a configuration error so we should probably warn about it.
                    LOGGER_SESSION.warn("[{}] Unexpected exception while applying a topology update", thisNodeId, rte);
                    outbounds.set(obs);
                    throw rte;
                }
            } catch (final ClusterInfoException e) {
                final String message = "Failed to find outgoing route information. Will retry shortly.";
                if (LOGGER_SESSION.isTraceEnabled())
                    LOGGER_SESSION.debug("[{}] {}", new Object[] { thisNodeId, message, e });
                else
                    LOGGER_SESSION.debug("[{}] {}", thisNodeId, message);
                return false;
            }
        }

        @Override
        public String toString() {
            return thisNodeId + " find nodes to route to";
        }
    };
    outbounds.set(new ApplicationState(tmanager, thisNode, new ConcurrentHashMap<>()));
    isRunning.set(true);
    checkup.process();
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) NodeInformation(net.dempsy.NodeInformation) ClusterInfoException(net.dempsy.cluster.ClusterInfoException) SafeString(net.dempsy.util.SafeString) PersistentTask(net.dempsy.utils.PersistentTask) Collection(java.util.Collection) ClusterInfoSession(net.dempsy.cluster.ClusterInfoSession) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 10 with ClusterInfoException

use of net.dempsy.cluster.ClusterInfoException in project Dempsy by Dempsy.

the class NodeManager method start.

public NodeManager start() throws DempsyException {
    validate();
    nodeStatsCollector = tr.track((NodeStatsCollector) node.getNodeStatsCollector());
    // TODO: cleaner?
    statsCollectorFactory = tr.track(new Manager<>(ClusterStatsCollectorFactory.class).getAssociatedInstance(node.getClusterStatsCollectorFactoryId()));
    // =====================================
    // set the dispatcher on adaptors and create containers for mp clusters
    final AtomicReference<String> firstAdaptorClusterName = new AtomicReference<>(null);
    node.getClusters().forEach(c -> {
        if (c.isAdaptor()) {
            if (firstAdaptorClusterName.get() == null)
                firstAdaptorClusterName.set(c.getClusterId().clusterName);
            final Adaptor adaptor = c.getAdaptor();
            adaptors.put(c.getClusterId(), adaptor);
            if (c.getRoutingStrategyId() != null && !"".equals(c.getRoutingStrategyId().trim()) && !" ".equals(c.getRoutingStrategyId().trim()))
                LOGGER.warn("The cluster " + c.getClusterId() + " contains an adaptor but also has the routingStrategy set. The routingStrategy will be ignored.");
            if (c.getOutputScheduler() != null)
                LOGGER.warn("The cluster " + c.getClusterId() + " contains an adaptor but also has an output executor set. The output executor will never be used.");
        } else {
            String containerTypeId = c.getContainerTypeId();
            if (containerTypeId == null)
                // can't be null
                containerTypeId = node.getContainerTypeId();
            final Container con = makeContainer(containerTypeId).setMessageProcessor(c.getMessageProcessor()).setClusterId(c.getClusterId()).setMaxPendingMessagesPerContainer(c.getMaxPendingMessagesPerContainer());
            // TODO: This is a hack for now.
            final Manager<RoutingStrategy.Inbound> inboundManager = new RoutingInboundManager();
            final RoutingStrategy.Inbound is = inboundManager.getAssociatedInstance(c.getRoutingStrategyId());
            final boolean outputSupported = c.getMessageProcessor().isOutputSupported();
            final Object outputScheduler = c.getOutputScheduler();
            // if there mp handles output but there's no output scheduler then we should warn.
            if (outputSupported && outputScheduler == null)
                LOGGER.warn("The cluster " + c.getClusterId() + " contains a message processor that supports an output cycle but there's no executor set so it will never be invoked.");
            if (!outputSupported && outputScheduler != null)
                LOGGER.warn("The cluster " + c.getClusterId() + " contains a message processor that doesn't support an output cycle but there's an output cycle executor set. The output cycle executor will never be used.");
            final OutputScheduler os = (outputSupported && outputScheduler != null) ? (OutputScheduler) outputScheduler : null;
            containers.add(new PerContainer(con, is, c, os));
        }
    });
    // it we're all adaptor then don't bother to get the receiver.
    if (containers.size() == 0) {
        // here there's no point in a receiver since there's nothing to receive.
        if (firstAdaptorClusterName.get() == null)
            throw new IllegalStateException("There seems to be no clusters or adaptors defined for this node \"" + node.toString() + "\"");
    } else {
        receiver = (Receiver) node.getReceiver();
        if (// otherwise we're all adaptor
        receiver != null)
            nodeAddress = receiver.getAddress(this);
        else if (firstAdaptorClusterName.get() == null)
            throw new IllegalStateException("There seems to be no clusters or adaptors defined for this node \"" + node.toString() + "\"");
    }
    nodeId = Optional.ofNullable(nodeAddress).map(n -> n.getGuid()).orElse(firstAdaptorClusterName.get());
    if (nodeStatsCollector == null) {
        LOGGER.warn("There is no {} set for the the application '{}'", StatsCollector.class.getSimpleName(), node.application);
        nodeStatsCollector = new DummyNodeStatsCollector();
    }
    nodeStatsCollector.setNodeId(nodeId);
    if (nodeAddress == null && node.getReceiver() != null)
        LOGGER.warn("The node at " + nodeId + " contains no message processors but has a Reciever set. The receiver will never be started.");
    if (nodeAddress == null && node.getDefaultRoutingStrategyId() != null)
        LOGGER.warn("The node at " + nodeId + " contains no message processors but has a defaultRoutingStrategyId set. The routingStrategyId will never be used.");
    if (threading == null)
        threading = tr.track(new DefaultThreadingModel(nodeId)).configure(node.getConfiguration()).start(nodeId);
    else if (!threading.isStarted())
        threading.start(nodeId);
    nodeStatsCollector.setMessagesPendingGauge(() -> threading.getNumberLimitedPending());
    final NodeReceiver nodeReciever = receiver == null ? null : tr.track(new NodeReceiver(containers.stream().map(pc -> pc.container).collect(Collectors.toList()), threading, nodeStatsCollector));
    final Map<ClusterId, ClusterInformation> messageTypesByClusterId = new HashMap<>();
    containers.stream().map(pc -> pc.clusterDefinition).forEach(c -> {
        messageTypesByClusterId.put(c.getClusterId(), new ClusterInformation(c.getRoutingStrategyId(), c.getClusterId(), c.getMessageProcessor().messagesTypesHandled()));
    });
    final NodeInformation nodeInfo = nodeAddress != null ? new NodeInformation(receiver.transportTypeId(), nodeAddress, messageTypesByClusterId) : null;
    // Then actually register the Node
    if (nodeInfo != null) {
        keepNodeRegstered = new PersistentTask(LOGGER, isRunning, persistenceScheduler, RETRY_PERIOND_MILLIS) {

            @Override
            public boolean execute() {
                try {
                    session.recursiveMkdir(rootPaths.clustersDir, null, DirMode.PERSISTENT, DirMode.PERSISTENT);
                    session.recursiveMkdir(rootPaths.nodesDir, null, DirMode.PERSISTENT, DirMode.PERSISTENT);
                    final String nodePath = rootPaths.nodesDir + "/" + nodeId;
                    session.mkdir(nodePath, nodeInfo, DirMode.EPHEMERAL);
                    final NodeInformation reread = (NodeInformation) session.getData(nodePath, this);
                    final boolean ret = nodeInfo.equals(reread);
                    if (ret == true)
                        ptaskReady.set(true);
                    return ret;
                } catch (final ClusterInfoException e) {
                    final String logmessage = "Failed to register the node. Retrying in " + RETRY_PERIOND_MILLIS + " milliseconds.";
                    if (LOGGER.isDebugEnabled())
                        LOGGER.info(logmessage, e);
                    else
                        LOGGER.info(logmessage, e);
                }
                return false;
            }

            @Override
            public String toString() {
                return "register node information";
            }
        };
    }
    // =====================================
    // The layering works this way.
    // 
    // Receiver -> NodeReceiver -> adaptor -> container -> OutgoingDispatcher -> RoutingStrategyOB -> Transport
    // 
    // starting needs to happen in reverse.
    // =====================================
    isRunning.set(true);
    this.tManager = tr.start(new TransportManager(), this);
    this.rsManager = tr.start(new RoutingStrategyManager(), this);
    // create the router but don't start it yet.
    this.router = new OutgoingDispatcher(rsManager, nodeAddress, nodeId, nodeReciever, tManager, nodeStatsCollector);
    // set up containers
    containers.forEach(pc -> pc.container.setDispatcher(router).setEvictionCycle(pc.clusterDefinition.getEvictionFrequency().evictionFrequency, pc.clusterDefinition.getEvictionFrequency().evictionTimeUnit));
    // IB routing strategy
    final int numContainers = containers.size();
    for (int i = 0; i < numContainers; i++) {
        final PerContainer c = containers.get(i);
        c.inboundStrategy.setContainerDetails(c.clusterDefinition.getClusterId(), new ContainerAddress(nodeAddress, i), c.container);
    }
    // setup the output executors by passing the containers
    containers.stream().filter(pc -> pc.outputScheduler != null).forEach(pc -> pc.outputScheduler.setOutputInvoker(pc.container));
    // set up adaptors
    adaptors.values().forEach(a -> a.setDispatcher(router));
    // start containers after setting inbound
    containers.forEach(pc -> tr.start(pc.container.setInbound(pc.inboundStrategy), this));
    // start the output schedulers now that the containers have been started.
    containers.stream().map(pc -> pc.outputScheduler).filter(os -> os != null).forEach(os -> tr.start(os, this));
    // start IB routing strategy
    containers.forEach(pc -> tr.start(pc.inboundStrategy, this));
    // start router
    tr.start(this.router, this);
    final PersistentTask startAdaptorAfterRouterIsRunning = new PersistentTask(LOGGER, isRunning, this.persistenceScheduler, 500) {

        @Override
        public boolean execute() {
            if (!router.isReady())
                return false;
            adaptors.entrySet().forEach(e -> threading.runDaemon(() -> tr.track(e.getValue()).start(), "Adaptor-" + e.getKey().clusterName));
            return true;
        }
    };
    // make sure the router is running. Once it is, start the adaptor(s)
    startAdaptorAfterRouterIsRunning.process();
    if (receiver != null)
        tr.track(receiver).start(nodeReciever, this);
    // close this session when we're done.
    tr.track(session);
    // make it known we're here and ready
    if (keepNodeRegstered != null)
        keepNodeRegstered.process();
    else
        ptaskReady.set(true);
    return this;
}
Also used : Adaptor(net.dempsy.messages.Adaptor) ClusterInfoException(net.dempsy.cluster.ClusterInfoException) TransportManager(net.dempsy.transport.TransportManager) ClusterInfoSession(net.dempsy.cluster.ClusterInfoSession) Node(net.dempsy.config.Node) ContainerAddress(net.dempsy.router.RoutingStrategy.ContainerAddress) DefaultThreadingModel(net.dempsy.threading.DefaultThreadingModel) NodeAddress(net.dempsy.transport.NodeAddress) LoggerFactory(org.slf4j.LoggerFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) DirMode(net.dempsy.cluster.DirMode) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) OutputScheduler(net.dempsy.output.OutputScheduler) RoutingStrategyManager(net.dempsy.router.RoutingStrategyManager) Functional.ignore(net.dempsy.util.Functional.ignore) RoutingStrategy(net.dempsy.router.RoutingStrategy) Map(java.util.Map) ThreadingModel(net.dempsy.threading.ThreadingModel) RoutingInboundManager(net.dempsy.router.RoutingInboundManager) Receiver(net.dempsy.transport.Receiver) ClusterId(net.dempsy.config.ClusterId) StatsCollector(net.dempsy.monitoring.StatsCollector) Cluster(net.dempsy.config.Cluster) ClusterStatsCollectorFactory(net.dempsy.monitoring.ClusterStatsCollectorFactory) DummyNodeStatsCollector(net.dempsy.monitoring.dummy.DummyNodeStatsCollector) Logger(org.slf4j.Logger) SafeString(net.dempsy.util.SafeString) ClusterInfoSessionFactory(net.dempsy.cluster.ClusterInfoSessionFactory) Collection(java.util.Collection) OutgoingDispatcher(net.dempsy.intern.OutgoingDispatcher) Collectors(java.util.stream.Collectors) AutoDisposeSingleThreadScheduler(net.dempsy.util.executor.AutoDisposeSingleThreadScheduler) TimeUnit(java.util.concurrent.TimeUnit) ClusterStatsCollector(net.dempsy.monitoring.ClusterStatsCollector) NodeStatsCollector(net.dempsy.monitoring.NodeStatsCollector) List(java.util.List) Adaptor(net.dempsy.messages.Adaptor) PersistentTask(net.dempsy.utils.PersistentTask) Optional(java.util.Optional) Container(net.dempsy.container.Container) Inbound(net.dempsy.router.RoutingStrategy.Inbound) MessageProcessorLifecycle(net.dempsy.messages.MessageProcessorLifecycle) Collections(java.util.Collections) RoutingStrategyManager(net.dempsy.router.RoutingStrategyManager) HashMap(java.util.HashMap) OutgoingDispatcher(net.dempsy.intern.OutgoingDispatcher) ClusterInfoException(net.dempsy.cluster.ClusterInfoException) SafeString(net.dempsy.util.SafeString) Inbound(net.dempsy.router.RoutingStrategy.Inbound) PersistentTask(net.dempsy.utils.PersistentTask) OutputScheduler(net.dempsy.output.OutputScheduler) RoutingInboundManager(net.dempsy.router.RoutingInboundManager) Container(net.dempsy.container.Container) DummyNodeStatsCollector(net.dempsy.monitoring.dummy.DummyNodeStatsCollector) RoutingStrategy(net.dempsy.router.RoutingStrategy) DummyNodeStatsCollector(net.dempsy.monitoring.dummy.DummyNodeStatsCollector) NodeStatsCollector(net.dempsy.monitoring.NodeStatsCollector) StatsCollector(net.dempsy.monitoring.StatsCollector) DummyNodeStatsCollector(net.dempsy.monitoring.dummy.DummyNodeStatsCollector) ClusterStatsCollector(net.dempsy.monitoring.ClusterStatsCollector) NodeStatsCollector(net.dempsy.monitoring.NodeStatsCollector) ClusterId(net.dempsy.config.ClusterId) AtomicReference(java.util.concurrent.atomic.AtomicReference) Inbound(net.dempsy.router.RoutingStrategy.Inbound) DefaultThreadingModel(net.dempsy.threading.DefaultThreadingModel) ContainerAddress(net.dempsy.router.RoutingStrategy.ContainerAddress) ClusterStatsCollectorFactory(net.dempsy.monitoring.ClusterStatsCollectorFactory) TransportManager(net.dempsy.transport.TransportManager)

Aggregations

ClusterInfoException (net.dempsy.cluster.ClusterInfoException)14 IOException (java.io.IOException)5 TimeUnit (java.util.concurrent.TimeUnit)5 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)5 DirMode (net.dempsy.cluster.DirMode)5 Properties (java.util.Properties)4 CountDownLatch (java.util.concurrent.CountDownLatch)4 ClusterInfoWatcher (net.dempsy.cluster.ClusterInfoWatcher)4 TestClusterImpls.recurseCreate (net.dempsy.cluster.TestClusterImpls.recurseCreate)4 JsonSerializer (net.dempsy.serialization.jackson.JsonSerializer)4 PersistentTask (net.dempsy.utils.PersistentTask)4 Condition (net.dempsy.utils.test.ConditionPoll.Condition)4 ConditionPoll.baseTimeoutMillis (net.dempsy.utils.test.ConditionPoll.baseTimeoutMillis)4 ConditionPoll.poll (net.dempsy.utils.test.ConditionPoll.poll)4 ZooKeeper (org.apache.zookeeper.ZooKeeper)4 Assert.assertEquals (org.junit.Assert.assertEquals)4 Assert.assertFalse (org.junit.Assert.assertFalse)4 Assert.assertTrue (org.junit.Assert.assertTrue)4 Test (org.junit.Test)4 Logger (org.slf4j.Logger)4