Search in sources :

Example 26 with NodeId

use of org.graylog2.plugin.system.NodeId in project graylog2-server by Graylog2.

the class IndexerClusterCheckerThread method doRun.

@Override
public void doRun() {
    if (!notificationService.isFirst(Notification.Type.ES_OPEN_FILES)) {
        return;
    }
    try {
        cluster.health().getStatus();
    } catch (Exception e) {
        LOG.info("Indexer not fully initialized yet. Skipping periodic cluster check.");
        return;
    }
    boolean allHigher = true;
    final Map<String, NodeInfo> nodesInfos = cluster.getDataNodes();
    final Map<String, NodeStats> nodesStats = cluster.getNodesStats(nodesInfos.keySet().toArray(new String[nodesInfos.size()]));
    for (Map.Entry<String, NodeStats> entry : nodesStats.entrySet()) {
        final String nodeId = entry.getKey();
        final NodeStats nodeStats = entry.getValue();
        final NodeInfo nodeInfo = nodesInfos.get(nodeId);
        final String nodeName = nodeInfo.getNode().getName();
        // Check number of maximum open files.
        final ProcessStats processStats = nodeStats.getProcess();
        if (processStats == null) {
            LOG.debug("Couldn't read process stats of Elasticsearch node {}", nodeName);
            return;
        }
        final long maxFileDescriptors = processStats.getMaxFileDescriptors();
        final JvmInfo jvmInfo = nodeInfo.getJvm();
        if (jvmInfo == null) {
            LOG.debug("Couldn't read JVM info of Elasticsearch node {}", nodeName);
            return;
        }
        final String osName = jvmInfo.getSystemProperties().getOrDefault("os.name", "");
        if (osName.startsWith("Windows")) {
            LOG.debug("Skipping open file limit check for Indexer node <{}> on Windows", nodeName);
        } else if (maxFileDescriptors != -1 && maxFileDescriptors < MINIMUM_OPEN_FILES_LIMIT) {
            // Write notification.
            final Notification notification = notificationService.buildNow().addType(Notification.Type.ES_OPEN_FILES).addSeverity(Notification.Severity.URGENT).addDetail("hostname", nodeInfo.getHostname()).addDetail("max_file_descriptors", maxFileDescriptors);
            if (notificationService.publishIfFirst(notification)) {
                LOG.warn("Indexer node <{}> open file limit is too low: [{}]. Set it to at least {}.", nodeName, maxFileDescriptors, MINIMUM_OPEN_FILES_LIMIT);
            }
            allHigher = false;
        }
    }
    if (allHigher) {
        Notification notification = notificationService.build().addType(Notification.Type.ES_OPEN_FILES);
        notificationService.fixed(notification);
    }
}
Also used : ProcessStats(org.elasticsearch.monitor.process.ProcessStats) JvmInfo(org.elasticsearch.monitor.jvm.JvmInfo) Notification(org.graylog2.notifications.Notification) NodeStats(org.elasticsearch.action.admin.cluster.node.stats.NodeStats) NodeInfo(org.elasticsearch.action.admin.cluster.node.info.NodeInfo) Map(java.util.Map)

Example 27 with NodeId

use of org.graylog2.plugin.system.NodeId in project graylog2-server by Graylog2.

the class KafkaTransport method doLaunch.

@Override
public void doLaunch(final MessageInput input) throws MisfireException {
    serverStatus.awaitRunning(new Runnable() {

        @Override
        public void run() {
            lifecycleStateChange(Lifecycle.RUNNING);
        }
    });
    // listen for lifecycle changes
    serverEventBus.register(this);
    final Properties props = new Properties();
    props.put("group.id", GROUP_ID);
    props.put("client.id", "gl2-" + nodeId + "-" + input.getId());
    props.put("fetch.min.bytes", String.valueOf(configuration.getInt(CK_FETCH_MIN_BYTES)));
    props.put("fetch.wait.max.ms", String.valueOf(configuration.getInt(CK_FETCH_WAIT_MAX)));
    props.put("zookeeper.connect", configuration.getString(CK_ZOOKEEPER));
    // Default auto commit interval is 60 seconds. Reduce to 1 second to minimize message duplication
    // if something breaks.
    props.put("auto.commit.interval.ms", "1000");
    // Set a consumer timeout to avoid blocking on the consumer iterator.
    props.put("consumer.timeout.ms", "1000");
    final int numThreads = configuration.getInt(CK_THREADS);
    final ConsumerConfig consumerConfig = new ConsumerConfig(props);
    cc = Consumer.createJavaConsumerConnector(consumerConfig);
    final TopicFilter filter = new Whitelist(configuration.getString(CK_TOPIC_FILTER));
    final List<KafkaStream<byte[], byte[]>> streams = cc.createMessageStreamsByFilter(filter, numThreads);
    final ExecutorService executor = executorService(numThreads);
    // this is being used during shutdown to first stop all submitted jobs before committing the offsets back to zookeeper
    // and then shutting down the connection.
    // this is to avoid yanking away the connection from the consumer runnables
    stopLatch = new CountDownLatch(streams.size());
    for (final KafkaStream<byte[], byte[]> stream : streams) {
        executor.submit(new Runnable() {

            @Override
            public void run() {
                final ConsumerIterator<byte[], byte[]> consumerIterator = stream.iterator();
                boolean retry;
                do {
                    retry = false;
                    try {
                        // noinspection WhileLoopReplaceableByForEach
                        while (consumerIterator.hasNext()) {
                            if (paused) {
                                // we try not to spin here, so we wait until the lifecycle goes back to running.
                                LOG.debug("Message processing is paused, blocking until message processing is turned back on.");
                                Uninterruptibles.awaitUninterruptibly(pausedLatch);
                            }
                            // check for being stopped before actually getting the message, otherwise we could end up losing that message
                            if (stopped) {
                                break;
                            }
                            if (isThrottled()) {
                                blockUntilUnthrottled();
                            }
                            // process the message, this will immediately mark the message as having been processed. this gets tricky
                            // if we get an exception about processing it down below.
                            final MessageAndMetadata<byte[], byte[]> message = consumerIterator.next();
                            final byte[] bytes = message.message();
                            // it is possible that the message is null
                            if (bytes == null) {
                                continue;
                            }
                            totalBytesRead.addAndGet(bytes.length);
                            lastSecBytesReadTmp.addAndGet(bytes.length);
                            final RawMessage rawMessage = new RawMessage(bytes);
                            // TODO implement throttling
                            input.processRawMessage(rawMessage);
                        }
                    } catch (ConsumerTimeoutException e) {
                        // Happens when there is nothing to consume, retry to check again.
                        retry = true;
                    } catch (Exception e) {
                        LOG.error("Kafka consumer error, stopping consumer thread.", e);
                    }
                } while (retry && !stopped);
                // explicitly commit our offsets when stopping.
                // this might trigger a couple of times, but it won't hurt
                cc.commitOffsets();
                stopLatch.countDown();
            }
        });
    }
    scheduler.scheduleAtFixedRate(new Runnable() {

        @Override
        public void run() {
            lastSecBytesRead.set(lastSecBytesReadTmp.getAndSet(0));
        }
    }, 1, 1, TimeUnit.SECONDS);
}
Also used : TopicFilter(kafka.consumer.TopicFilter) MessageAndMetadata(kafka.message.MessageAndMetadata) KafkaStream(kafka.consumer.KafkaStream) Properties(java.util.Properties) CountDownLatch(java.util.concurrent.CountDownLatch) ConsumerTimeoutException(kafka.consumer.ConsumerTimeoutException) MisfireException(org.graylog2.plugin.inputs.MisfireException) ConsumerIterator(kafka.consumer.ConsumerIterator) InstrumentedExecutorService(com.codahale.metrics.InstrumentedExecutorService) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ExecutorService(java.util.concurrent.ExecutorService) Whitelist(kafka.consumer.Whitelist) ConsumerTimeoutException(kafka.consumer.ConsumerTimeoutException) ConsumerConfig(kafka.consumer.ConsumerConfig) RawMessage(org.graylog2.plugin.journal.RawMessage)

Example 28 with NodeId

use of org.graylog2.plugin.system.NodeId in project graylog2-server by Graylog2.

the class InputServiceImpl method allOfThisNode.

@Override
public List<Input> allOfThisNode(final String nodeId) {
    final List<BasicDBObject> query = ImmutableList.of(new BasicDBObject(MessageInput.FIELD_NODE_ID, nodeId), new BasicDBObject(MessageInput.FIELD_GLOBAL, true));
    final List<DBObject> ownInputs = query(InputImpl.class, new BasicDBObject("$or", query));
    final ImmutableList.Builder<Input> inputs = ImmutableList.builder();
    for (final DBObject o : ownInputs) {
        inputs.add(new InputImpl((ObjectId) o.get("_id"), o.toMap()));
    }
    return inputs.build();
}
Also used : BasicDBObject(com.mongodb.BasicDBObject) MessageInput(org.graylog2.plugin.inputs.MessageInput) ObjectId(org.bson.types.ObjectId) ImmutableList(com.google.common.collect.ImmutableList) DBObject(com.mongodb.DBObject) BasicDBObject(com.mongodb.BasicDBObject)

Example 29 with NodeId

use of org.graylog2.plugin.system.NodeId in project graylog2-server by Graylog2.

the class MessageInputFactory method create.

public MessageInput create(InputCreateRequest lr, String user, String nodeId) throws NoSuchInputTypeException {
    final MessageInput input = create(lr.type(), new Configuration(lr.configuration()));
    input.setTitle(lr.title());
    input.setGlobal(lr.global());
    input.setCreatorUserId(user);
    input.setCreatedAt(Tools.nowUTC());
    if (!lr.global())
        input.setNodeId(nodeId);
    return input;
}
Also used : Configuration(org.graylog2.plugin.configuration.Configuration) MessageInput(org.graylog2.plugin.inputs.MessageInput)

Example 30 with NodeId

use of org.graylog2.plugin.system.NodeId in project graylog2-server by Graylog2.

the class V20170110150100_FixAlertConditionsMigrationTest method setUp.

@Before
public void setUp() throws Exception {
    this.clusterConfigService = spy(new ClusterConfigServiceImpl(objectMapperProvider, fongoRule.getConnection(), nodeId, new ChainingClassLoader(getClass().getClassLoader()), new ClusterEventBus()));
    final MongoConnection mongoConnection = spy(fongoRule.getConnection());
    final MongoDatabase mongoDatabase = spy(fongoRule.getDatabase());
    when(mongoConnection.getMongoDatabase()).thenReturn(mongoDatabase);
    this.collection = spy(mongoDatabase.getCollection("streams"));
    when(mongoDatabase.getCollection("streams")).thenReturn(collection);
    this.migration = new V20170110150100_FixAlertConditionsMigration(mongoConnection, clusterConfigService);
}
Also used : ClusterConfigServiceImpl(org.graylog2.cluster.ClusterConfigServiceImpl) MongoConnection(org.graylog2.database.MongoConnection) ChainingClassLoader(org.graylog2.shared.plugins.ChainingClassLoader) ClusterEventBus(org.graylog2.events.ClusterEventBus) MongoDatabase(com.mongodb.client.MongoDatabase) Before(org.junit.Before)

Aggregations

Test (org.junit.Test)13 Timed (com.codahale.metrics.annotation.Timed)7 ApiOperation (io.swagger.annotations.ApiOperation)7 Node (org.graylog2.cluster.Node)7 WebApplicationException (javax.ws.rs.WebApplicationException)6 Before (org.junit.Before)6 ChainingClassLoader (org.graylog2.shared.plugins.ChainingClassLoader)5 GET (javax.ws.rs.GET)4 Path (javax.ws.rs.Path)4 Settings (org.elasticsearch.common.settings.Settings)4 ElasticsearchConfiguration (org.graylog2.configuration.ElasticsearchConfiguration)4 File (java.io.File)3 RequiresPermissions (org.apache.shiro.authz.annotation.RequiresPermissions)3 ClusterConfigServiceImpl (org.graylog2.cluster.ClusterConfigServiceImpl)3 JadConfig (com.github.joschi.jadconfig.JadConfig)2 InMemoryRepository (com.github.joschi.jadconfig.repositories.InMemoryRepository)2 BasicDBObject (com.mongodb.BasicDBObject)2 DBObject (com.mongodb.DBObject)2 Map (java.util.Map)2 PUT (javax.ws.rs.PUT)2