Search in sources :

Example 1 with Journal

use of org.graylog2.shared.journal.Journal in project graylog2-server by Graylog2.

the class RawMessage method encode.

public byte[] encode() {
    try {
        final JournalMessages.CodecInfo codec = msgBuilder.getCodec();
        final JournalMessages.CodecInfo.Builder builder = JournalMessages.CodecInfo.newBuilder(codec);
        final String codecConfigJson = codecConfig.serializeToJson();
        if (codecConfigJson != null) {
            builder.setConfig(codecConfigJson);
        }
        msgBuilder.setCodec(builder.build());
        final JournalMessage journalMessage = msgBuilder.build();
        return journalMessage.toByteArray();
    } catch (UninitializedMessageException e) {
        log.error("Unable to write RawMessage to journal because required fields are missing, " + "this message will be discarded. This is a bug.", e);
        return null;
    }
}
Also used : UninitializedMessageException(com.google.protobuf.UninitializedMessageException) JournalMessage(org.graylog2.plugin.journal.JournalMessages.JournalMessage) ByteString(com.google.protobuf.ByteString)

Example 2 with Journal

use of org.graylog2.shared.journal.Journal in project graylog2-server by Graylog2.

the class MessageFilterChainProcessorTest method testFiltersAreOrdered.

@Test
public void testFiltersAreOrdered() {
    final DummyFilter third = new DummyFilter(30);
    final DummyFilter first = new DummyFilter(10);
    final DummyFilter second = new DummyFilter(20);
    final Set<MessageFilter> filters = ImmutableSet.of(third, first, second);
    final MessageFilterChainProcessor processor = new MessageFilterChainProcessor(new MetricRegistry(), filters, journal, serverStatus);
    final List<MessageFilter> filterRegistry = processor.getFilterRegistry();
    Assert.assertEquals(filterRegistry.get(0), first);
    Assert.assertEquals(filterRegistry.get(1), second);
    Assert.assertEquals(filterRegistry.get(2), third);
}
Also used : MetricRegistry(com.codahale.metrics.MetricRegistry) MessageFilter(org.graylog2.plugin.filters.MessageFilter) Test(org.junit.Test)

Example 3 with Journal

use of org.graylog2.shared.journal.Journal in project graylog2-server by Graylog2.

the class IndexerSetupService method startUp.

@Override
protected void startUp() throws Exception {
    Tools.silenceUncaughtExceptionsInThisThread();
    LOG.debug("Starting indexer");
    node.start();
    final Client client = node.client();
    try {
        /* try to determine the cluster health. if this times out we could not connect and try to determine if there's
                   anything listening at all. if that happens this usually has these reasons:
                    1. cluster.name is different
                    2. network.publish_host is not reachable
                    3. wrong address configured
                    4. multicast in use but broken in this environment
                   we handle a red cluster state differently because if we can get that result it means the cluster itself
                   is reachable, which is a completely different problem from not being able to join at all.
                 */
        final ClusterHealthRequest atLeastRed = client.admin().cluster().prepareHealth().setWaitForStatus(ClusterHealthStatus.RED).request();
        final ClusterHealthResponse health = client.admin().cluster().health(atLeastRed).actionGet(configuration.getClusterDiscoveryTimeout(), MILLISECONDS);
        // we don't get here if we couldn't join the cluster. just check for red cluster state
        if (ClusterHealthStatus.RED.equals(health.getStatus())) {
            final Notification notification = notificationService.buildNow().addSeverity(Notification.Severity.URGENT).addType(Notification.Type.ES_CLUSTER_RED);
            notificationService.publishIfFirst(notification);
            LOG.warn("The Elasticsearch cluster state is RED which means shards are unassigned.");
            LOG.info("This usually indicates a crashed and corrupt cluster and needs to be investigated. Graylog will write into the local disk journal.");
            LOG.info("See {} for details.", DocsHelper.PAGE_ES_CONFIGURATION);
        }
        if (ClusterHealthStatus.GREEN.equals(health.getStatus())) {
            notificationService.fixed(Notification.Type.ES_CLUSTER_RED);
        }
        notificationService.fixed(Notification.Type.ES_UNAVAILABLE);
    } catch (ElasticsearchTimeoutException e) {
        final String hosts = node.settings().get("discovery.zen.ping.unicast.hosts");
        if (!isNullOrEmpty(hosts)) {
            final Iterable<String> hostList = Splitter.on(',').omitEmptyStrings().trimResults().split(hosts);
            for (String host : hostList) {
                final URI esUri = URI.create("http://" + HostAndPort.fromString(host).getHostText() + ":9200/");
                LOG.info("Checking Elasticsearch HTTP API at {}", esUri);
                // Try the HTTP API endpoint
                final Request request = new Request.Builder().get().url(esUri.resolve("/_nodes").toString()).build();
                try (final Response response = httpClient.newCall(request).execute()) {
                    if (response.isSuccessful()) {
                        final JsonNode resultTree = objectMapper.readTree(response.body().byteStream());
                        final JsonNode nodesList = resultTree.get("nodes");
                        if (!configuration.isDisableVersionCheck()) {
                            final Iterator<String> nodes = nodesList.fieldNames();
                            while (nodes.hasNext()) {
                                final String id = nodes.next();
                                final Version clusterVersion = Version.fromString(nodesList.get(id).get("version").textValue());
                                checkClusterVersion(clusterVersion);
                            }
                        }
                        final String clusterName = resultTree.get("cluster_name").textValue();
                        checkClusterName(clusterName);
                    } else {
                        LOG.error("Could not connect to Elasticsearch at " + esUri + ". Is it running?");
                    }
                } catch (IOException ioException) {
                    LOG.error("Could not connect to Elasticsearch: {}", ioException.getMessage());
                }
            }
        }
        final Notification notification = notificationService.buildNow().addSeverity(Notification.Severity.URGENT).addType(Notification.Type.ES_UNAVAILABLE);
        notificationService.publishIfFirst(notification);
        LOG.warn("Could not connect to Elasticsearch");
        LOG.info("If you're using multicast, check that it is working in your network and that Elasticsearch is accessible. Also check that the cluster name setting is correct.");
        LOG.info("See {} for details.", DocsHelper.PAGE_ES_CONFIGURATION);
    }
}
Also used : ClusterHealthResponse(org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse) ClusterHealthRequest(org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) Request(okhttp3.Request) ClusterHealthRequest(org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest) JsonNode(com.fasterxml.jackson.databind.JsonNode) IOException(java.io.IOException) URI(java.net.URI) Notification(org.graylog2.notifications.Notification) Response(okhttp3.Response) ClusterHealthResponse(org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse) ElasticsearchTimeoutException(org.elasticsearch.ElasticsearchTimeoutException) Version(org.elasticsearch.Version) Iterator(java.util.Iterator) Client(org.elasticsearch.client.Client) OkHttpClient(okhttp3.OkHttpClient)

Example 4 with Journal

use of org.graylog2.shared.journal.Journal in project graylog2-server by Graylog2.

the class ClusterJournalResource method get.

@GET
@Timed
@ApiOperation(value = "Get message journal information of a given node")
@RequiresPermissions(RestPermissions.JOURNAL_READ)
public JournalSummaryResponse get(@ApiParam(name = "nodeId", value = "The id of the node to get message journal information.", required = true) @PathParam("nodeId") String nodeId) throws IOException, NodeNotFoundException {
    final Node targetNode = nodeService.byNodeId(nodeId);
    final RemoteJournalResource remoteJournalResource = remoteInterfaceProvider.get(targetNode, this.authenticationToken, RemoteJournalResource.class);
    final Response<JournalSummaryResponse> response = remoteJournalResource.get().execute();
    if (response.isSuccessful()) {
        return response.body();
    } else {
        LOG.warn("Unable to get message journal information on node {}: {}", nodeId, response.message());
        throw new WebApplicationException(response.message(), BAD_GATEWAY);
    }
}
Also used : RemoteJournalResource(org.graylog2.rest.resources.system.RemoteJournalResource) WebApplicationException(javax.ws.rs.WebApplicationException) Node(org.graylog2.cluster.Node) JournalSummaryResponse(org.graylog2.rest.resources.system.responses.JournalSummaryResponse) RequiresPermissions(org.apache.shiro.authz.annotation.RequiresPermissions) Timed(com.codahale.metrics.annotation.Timed) GET(javax.ws.rs.GET) ApiOperation(io.swagger.annotations.ApiOperation)

Example 5 with Journal

use of org.graylog2.shared.journal.Journal in project graylog2-server by Graylog2.

the class ThrottleStateUpdaterThread method doRun.

@Override
public void doRun() {
    throttleState = new ThrottleState(throttleState);
    final long committedOffset = journal.getCommittedOffset();
    // TODO there's a lot of duplication around this class. Probably should be refactored a bit.
    // also update metrics for each of the values, so clients can get to it cheaply
    long prevTs = currentTs;
    currentTs = System.nanoTime();
    long previousLogEndOffset = logEndOffset;
    long previousReadOffset = currentReadOffset;
    long logStartOffset = journal.getLogStartOffset();
    // -1 because getLogEndOffset is the next offset that gets assigned
    logEndOffset = journal.getLogEndOffset() - 1;
    // just to make it clear which field we read
    currentReadOffset = journal.getNextReadOffset() - 1;
    // for the first run, don't send an update, there's no previous data available to calc rates
    if (firstRun) {
        firstRun = false;
        return;
    }
    throttleState.appendEventsPerSec = (long) Math.floor((logEndOffset - previousLogEndOffset) / ((currentTs - prevTs) / 1.0E09));
    throttleState.readEventsPerSec = (long) Math.floor((currentReadOffset - previousReadOffset) / ((currentTs - prevTs) / 1.0E09));
    throttleState.journalSize = journal.size();
    throttleState.journalSizeLimit = retentionSize.toBytes();
    throttleState.processBufferCapacity = processBuffer.getRemainingCapacity();
    if (committedOffset == KafkaJournal.DEFAULT_COMMITTED_OFFSET) {
        // nothing committed at all, the entire log is uncommitted, or completely empty.
        throttleState.uncommittedJournalEntries = journal.size() == 0 ? 0 : logEndOffset - logStartOffset;
    } else {
        throttleState.uncommittedJournalEntries = logEndOffset - committedOffset;
    }
    log.debug("ThrottleState: {}", throttleState);
    // the journal needs this to provide information to rest clients
    journal.setThrottleState(throttleState);
    // Abusing the current thread to send notifications from KafkaJournal in the graylog2-shared module
    final double journalUtilizationPercentage = throttleState.journalSizeLimit > 0 ? (throttleState.journalSize * 100) / throttleState.journalSizeLimit : 0.0;
    if (journalUtilizationPercentage > KafkaJournal.NOTIFY_ON_UTILIZATION_PERCENTAGE) {
        Notification notification = notificationService.buildNow().addNode(serverStatus.getNodeId().toString()).addType(Notification.Type.JOURNAL_UTILIZATION_TOO_HIGH).addSeverity(Notification.Severity.URGENT).addDetail("journal_utilization_percentage", journalUtilizationPercentage);
        notificationService.publishIfFirst(notification);
    }
    if (journal.getPurgedSegmentsInLastRetention() > 0) {
        Notification notification = notificationService.buildNow().addNode(serverStatus.getNodeId().toString()).addType(Notification.Type.JOURNAL_UNCOMMITTED_MESSAGES_DELETED).addSeverity(Notification.Severity.URGENT);
        notificationService.publishIfFirst(notification);
    }
}
Also used : ThrottleState(org.graylog2.plugin.ThrottleState) Notification(org.graylog2.notifications.Notification)

Aggregations

MetricRegistry (com.codahale.metrics.MetricRegistry)5 Test (org.junit.Test)5 Message (org.graylog2.plugin.Message)4 MessageFilter (org.graylog2.plugin.filters.MessageFilter)4 DateTime (org.joda.time.DateTime)3 Timer (com.codahale.metrics.Timer)2 Timed (com.codahale.metrics.annotation.Timed)2 ApiOperation (io.swagger.annotations.ApiOperation)2 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 GET (javax.ws.rs.GET)2 LogSegment (kafka.log.LogSegment)2 RequiresPermissions (org.apache.shiro.authz.annotation.RequiresPermissions)2 Notification (org.graylog2.notifications.Notification)2 Messages (org.graylog2.plugin.Messages)2 ThrottleState (org.graylog2.plugin.ThrottleState)2 RawMessage (org.graylog2.plugin.journal.RawMessage)2 HdrTimer (org.graylog2.shared.metrics.HdrTimer)2 JsonNode (com.fasterxml.jackson.databind.JsonNode)1 Size (com.github.joschi.jadconfig.util.Size)1