use of org.opennms.core.criteria.CriteriaBuilder in project opennms by OpenNMS.
the class BusinessServiceSearchProvider method queryVertices.
@Override
public List<? extends VertexRef> queryVertices(SearchQuery searchQuery, GraphContainer container) {
List<BusinessServiceVertex> results = Lists.newArrayList();
String queryString = searchQuery.getQueryString();
CriteriaBuilder bldr = new CriteriaBuilder(BusinessService.class);
if (queryString != null && queryString.length() > 0) {
bldr.ilike("name", String.format("%%%s%%", queryString));
}
bldr.orderBy("name", true);
bldr.limit(10);
Criteria dbQueryCriteria = bldr.toCriteria();
for (BusinessService bs : businessServiceManager.findMatching(dbQueryCriteria)) {
final BusinessServiceVertex businessServiceVertex = new BusinessServiceVertex(bs, 0);
// Only consider results which are available in the Topology Provider, see BSM-191
if (container.getTopologyServiceClient().getVertex(businessServiceVertex) != null) {
results.add(businessServiceVertex);
}
}
return results;
}
use of org.opennms.core.criteria.CriteriaBuilder in project opennms by OpenNMS.
the class BusinessServicesTopologyProvider method getDefaults.
@Override
public Defaults getDefaults() {
return new Defaults().withPreferredLayout("Hierarchy Layout").withCriteria(() -> {
// Grab the business service with the smallest id
List<BusinessService> businessServices = businessServiceManager.findMatching(new CriteriaBuilder(BusinessService.class).orderBy("id", true).limit(1).toCriteria());
// If one was found, use it for the default focus
if (!businessServices.isEmpty()) {
BusinessService businessService = businessServices.iterator().next();
BusinessServiceVertex businessServiceVertex = new BusinessServiceVertex(businessService, 0);
return Lists.newArrayList(new VertexHopGraphProvider.DefaultVertexHopCriteria(businessServiceVertex));
}
return null;
});
}
use of org.opennms.core.criteria.CriteriaBuilder in project opennms by OpenNMS.
the class SyslogOverlappingIpAddressIT method testAssociateSyslogsWithNodesWithOverlappingIpAddresses.
/**
* @see https://issues.opennms.org/browse/NMS-8798
*
* @throws Exception
*/
@Test
public void testAssociateSyslogsWithNodesWithOverlappingIpAddresses() throws Exception {
final Date startOfTest = new Date();
final String hostIpAddress = "1.2.3.4";
// Create requisition with two node in different locations but same IP
final RestClient client = new RestClient(testEnvironment.getServiceAddress(ContainerAlias.OPENNMS, 8980));
final Requisition requisition = new Requisition("overlapping");
final RequisitionNode node1 = new RequisitionNode();
node1.setNodeLabel("node_1");
node1.setLocation("MINION");
final RequisitionInterface interface1 = new RequisitionInterface();
interface1.setIpAddr(hostIpAddress);
interface1.setManaged(true);
interface1.setSnmpPrimary(PrimaryType.PRIMARY);
node1.setInterfaces(ImmutableList.of(interface1));
node1.setForeignId("node_1");
requisition.insertNode(node1);
final RequisitionNode node2 = new RequisitionNode();
node2.setNodeLabel("node_2");
node2.setLocation("BANANA");
final RequisitionInterface interface2 = new RequisitionInterface();
interface2.setIpAddr(hostIpAddress);
interface2.setManaged(true);
interface2.setSnmpPrimary(PrimaryType.PRIMARY);
node2.setInterfaces(ImmutableList.of(interface2));
node2.setForeignId("node_2");
requisition.insertNode(node2);
client.addOrReplaceRequisition(requisition);
client.importRequisition("overlapping");
// Wait for the nodes to be provisioned
final OnmsNode onmsNode1 = await().atMost(1, MINUTES).pollInterval(5, SECONDS).until(DaoUtils.findMatchingCallable(getDaoFactory().getDao(NodeDaoHibernate.class), new CriteriaBuilder(OnmsNode.class).eq("label", "node_1").toCriteria()), notNullValue());
final OnmsNode onmsNode2 = await().atMost(1, MINUTES).pollInterval(5, SECONDS).until(DaoUtils.findMatchingCallable(getDaoFactory().getDao(NodeDaoHibernate.class), new CriteriaBuilder(OnmsNode.class).eq("label", "node_2").toCriteria()), notNullValue());
// Sending syslog messages to each node and expect it to appear on the node
sendMessage(ContainerAlias.MINION, hostIpAddress, 1);
await().atMost(1, MINUTES).pollInterval(5, SECONDS).until(DaoUtils.countMatchingCallable(getDaoFactory().getDao(EventDaoHibernate.class), new CriteriaBuilder(OnmsEvent.class).eq("eventUei", "uei.opennms.org/vendor/cisco/syslog/SEC-6-IPACCESSLOGP/aclDeniedIPTraffic").ge("eventCreateTime", startOfTest).eq("node", onmsNode1).toCriteria()), is(1));
sendMessage(ContainerAlias.MINION_OTHER_LOCATION, hostIpAddress, 1);
await().atMost(1, MINUTES).pollInterval(5, SECONDS).until(DaoUtils.countMatchingCallable(getDaoFactory().getDao(EventDaoHibernate.class), new CriteriaBuilder(OnmsEvent.class).eq("eventUei", "uei.opennms.org/vendor/cisco/syslog/SEC-6-IPACCESSLOGP/aclDeniedIPTraffic").ge("eventCreateTime", startOfTest).eq("node", onmsNode2).toCriteria()), is(1));
}
use of org.opennms.core.criteria.CriteriaBuilder in project opennms by OpenNMS.
the class TrapIT method canReceiveTraps.
@Test
public void canReceiveTraps() throws Exception {
Date startOfTest = new Date();
final InetSocketAddress trapAddr = minionSystem.getServiceAddress(ContainerAlias.MINION, 1162, "udp");
// Connect to the postgresql container
InetSocketAddress pgsql = minionSystem.getServiceAddress(ContainerAlias.POSTGRES, 5432);
HibernateDaoFactory daoFactory = new HibernateDaoFactory(pgsql);
EventDao eventDao = daoFactory.getDao(EventDaoHibernate.class);
// Parsing the message correctly relies on the customized syslogd-configuration.xml that is part of the OpenNMS image
Criteria criteria = new CriteriaBuilder(OnmsEvent.class).eq("eventUei", "uei.opennms.org/generic/traps/SNMP_Warm_Start").ge("eventTime", startOfTest).toCriteria();
// Send traps to the Minion listener until one makes it through
await().atMost(5, MINUTES).pollInterval(30, SECONDS).pollDelay(0, SECONDS).until(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
sendTrap(trapAddr);
try {
await().atMost(30, SECONDS).pollInterval(5, SECONDS).until(DaoUtils.countMatchingCallable(eventDao, criteria), greaterThanOrEqualTo(1));
} catch (final Exception e) {
return false;
}
return true;
}
});
}
use of org.opennms.core.criteria.CriteriaBuilder in project opennms by OpenNMS.
the class SyslogKafkaElasticsearch5OutageIT method testMinionSyslogsOverKafkaToEsRest.
@Test
public void testMinionSyslogsOverKafkaToEsRest() throws Exception {
Date startOfTest = new Date();
int numMessages = 10000;
int packetsPerSecond = 250;
InetSocketAddress minionSshAddr = testEnvironment.getServiceAddress(ContainerAlias.MINION, 8201);
InetSocketAddress opennmsSshAddr = testEnvironment.getServiceAddress(ContainerAlias.OPENNMS, 8101);
InetSocketAddress kafkaAddress = testEnvironment.getServiceAddress(ContainerAlias.KAFKA, 9092);
InetSocketAddress zookeeperAddress = testEnvironment.getServiceAddress(ContainerAlias.KAFKA, 2181);
// Install the Kafka syslog and trap handlers on the Minion system
installFeaturesOnMinion(minionSshAddr, kafkaAddress);
// Install the Kafka and Elasticsearch features on the OpenNMS system
installFeaturesOnOpenNMS(opennmsSshAddr, kafkaAddress, zookeeperAddress);
final String sender = testEnvironment.getContainerInfo(ContainerAlias.SNMPD).networkSettings().ipAddress();
// Wait for the minion to show up
await().atMost(90, SECONDS).pollInterval(5, SECONDS).until(DaoUtils.countMatchingCallable(getDaoFactory().getDao(MinionDaoHibernate.class), new CriteriaBuilder(OnmsMinion.class).gt("lastUpdated", startOfTest).eq("location", "MINION").toCriteria()), is(1));
LOG.info("Warming up syslog routes by sending 100 packets");
// Warm up the routes
sendMessage(ContainerAlias.MINION, sender, 100);
for (int i = 0; i < 10; i++) {
LOG.info("Slept for " + i + " seconds");
Thread.sleep(1000);
}
LOG.info("Resetting statistics");
resetRouteStatistics(opennmsSshAddr, minionSshAddr);
for (int i = 0; i < 20; i++) {
LOG.info("Slept for " + i + " seconds");
Thread.sleep(1000);
}
// Make sure that this evenly divides into the numMessages
final int chunk = 250;
// Make sure that this is an even multiple of chunk
final int logEvery = 1000;
int count = 0;
long start = System.currentTimeMillis();
AtomicInteger restartCounter = new AtomicInteger();
// Start a timer that occasionally restarts Elasticsearch
Timer restarter = new Timer("Elasticsearch-Restarter", true);
restarter.scheduleAtFixedRate(new TimerTask() {
@Override
public void run() {
final DockerClient docker = ((AbstractTestEnvironment) testEnvironment).getDockerClient();
final String id = testEnvironment.getContainerInfo(ContainerAlias.ELASTICSEARCH_5).id();
try {
LOG.info("Restarting container: {}", id);
docker.restartContainer(id);
restartCounter.incrementAndGet();
LOG.info("Container restarted: {}", id);
} catch (DockerException | InterruptedException e) {
LOG.warn("Unexpected exception while restarting container {}", id, e);
}
}
}, 0L, TimeUnit.SECONDS.toMillis(29));
// Send ${numMessages} syslog messages
RateLimiter limiter = RateLimiter.create(packetsPerSecond);
for (int i = 0; i < (numMessages / chunk); i++) {
limiter.acquire(chunk);
sendMessage(ContainerAlias.MINION, sender, chunk);
count += chunk;
if (count % logEvery == 0) {
long mid = System.currentTimeMillis();
LOG.info(String.format("Sent %d packets in %d milliseconds", logEvery, mid - start));
start = System.currentTimeMillis();
}
}
// Stop restarting Elasticsearch
restarter.cancel();
// 100 warm-up messages plus ${numMessages} messages
pollForElasticsearchEventsUsingJest(this::getEs5Address, 100 + numMessages);
assertTrue("Elasticsearch was never restarted", restartCounter.get() > 0);
}
Aggregations