use of org.graylog2.shared.journal.Journal in project graylog2-server by Graylog2.
the class MessageFilterChainProcessorTest method testMessagesCanBeDropped.
@Test
public void testMessagesCanBeDropped() {
final MessageFilter first = new DummyFilter(10);
final MessageFilter second = new RemovingMessageFilter();
final Set<MessageFilter> filters = ImmutableSet.of(first, second);
final MessageFilterChainProcessor processor = new MessageFilterChainProcessor(new MetricRegistry(), filters, journal, serverStatus);
final Message message = new Message("message", "source", new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC));
final Messages result = processor.process(message);
assertThat(result).isEmpty();
}
use of org.graylog2.shared.journal.Journal in project graylog2-server by Graylog2.
the class JournalDecode method runCommand.
@Override
protected void runCommand() {
Range<Long> range;
try {
final List<String> offsets = Splitter.on("..").limit(2).splitToList(rangeArg);
if (offsets.size() == 1) {
range = Range.singleton(Long.valueOf(offsets.get(0)));
} else if (offsets.size() == 2) {
final String first = offsets.get(0);
final String second = offsets.get(1);
if (first.isEmpty()) {
range = Range.atMost(Long.valueOf(second));
} else if (second.isEmpty()) {
range = Range.atLeast(Long.valueOf(first));
} else {
range = Range.closed(Long.valueOf(first), Long.valueOf(second));
}
} else {
throw new RuntimeException();
}
} catch (Exception e) {
System.err.println("Malformed offset range: " + rangeArg);
return;
}
final Map<String, Codec.Factory<? extends Codec>> codecFactory = injector.getInstance(Key.get(new TypeLiteral<Map<String, Codec.Factory<? extends Codec>>>() {
}));
final Long readOffset = range.lowerEndpoint();
final long count = range.upperEndpoint() - range.lowerEndpoint() + 1;
final List<Journal.JournalReadEntry> entries = journal.read(readOffset, count);
for (final Journal.JournalReadEntry entry : entries) {
final RawMessage raw = RawMessage.decode(entry.getPayload(), entry.getOffset());
if (raw == null) {
System.err.println(MessageFormatter.format("Journal entry at offset {} failed to decode", entry.getOffset()));
continue;
}
final Codec codec = codecFactory.get(raw.getCodecName()).create(raw.getCodecConfig());
final Message message = codec.decode(raw);
if (message == null) {
System.err.println(MessageFormatter.format("Could not use codec {} to decode raw message id {} at offset {}", new Object[] { raw.getCodecName(), raw.getId(), entry.getOffset() }));
} else {
message.setJournalOffset(raw.getJournalOffset());
}
final ResolvableInetSocketAddress remoteAddress = raw.getRemoteAddress();
final String remote = remoteAddress == null ? "unknown address" : remoteAddress.getInetSocketAddress().toString();
final StringBuffer sb = new StringBuffer();
sb.append("Message ").append(raw.getId()).append('\n').append(" at ").append(raw.getTimestamp()).append('\n').append(" in format ").append(raw.getCodecName()).append('\n').append(" at offset ").append(raw.getJournalOffset()).append('\n').append(" received from remote address ").append(remote).append('\n').append(" (source field: ").append(message == null ? "unparsed" : message.getSource()).append(')').append('\n');
if (message != null) {
sb.append(" contains ").append(message.getFieldNames().size()).append(" fields.");
} else {
sb.append("The message could not be parse by the given codec.");
}
System.out.println(sb);
}
}
use of org.graylog2.shared.journal.Journal in project graylog2-server by Graylog2.
the class JournalResource method show.
@GET
@Timed
@ApiOperation(value = "Get current state of the journal on this node.")
@RequiresPermissions(RestPermissions.JOURNAL_READ)
public JournalSummaryResponse show() {
if (!journalEnabled) {
return JournalSummaryResponse.createDisabled();
}
if (journal instanceof KafkaJournal) {
final KafkaJournal kafkaJournal = (KafkaJournal) journal;
final ThrottleState throttleState = kafkaJournal.getThrottleState();
long oldestSegment = Long.MAX_VALUE;
for (final LogSegment segment : kafkaJournal.getSegments()) {
oldestSegment = Math.min(oldestSegment, segment.created());
}
return JournalSummaryResponse.createEnabled(throttleState.appendEventsPerSec, throttleState.readEventsPerSec, throttleState.uncommittedJournalEntries, Size.bytes(throttleState.journalSize), Size.bytes(throttleState.journalSizeLimit), kafkaJournal.numberOfSegments(), new DateTime(oldestSegment, DateTimeZone.UTC), kafkaJournalConfiguration);
}
log.warn("Unknown Journal implementation {} in use, cannot get information about it. Pretending journal is disabled.", journal.getClass());
return JournalSummaryResponse.createDisabled();
}
use of org.graylog2.shared.journal.Journal in project graylog2-server by Graylog2.
the class KafkaJournalTest method segmentAgeCleanup.
@Test
public void segmentAgeCleanup() throws Exception {
final InstantMillisProvider clock = new InstantMillisProvider(DateTime.now(DateTimeZone.UTC));
DateTimeUtils.setCurrentMillisProvider(clock);
try {
final Size segmentSize = Size.kilobytes(1L);
final KafkaJournal journal = new KafkaJournal(journalDirectory, scheduler, segmentSize, Duration.standardHours(1), Size.kilobytes(10L), Duration.standardMinutes(1), 1_000_000, Duration.standardMinutes(1), 100, new MetricRegistry(), serverStatus);
final File messageJournalDir = new File(journalDirectory, "messagejournal-0");
assertTrue(messageJournalDir.exists());
// we need to fix up the last modified times of the actual files.
long[] lastModifiedTs = new long[2];
// create two chunks, 30 seconds apart
createBulkChunks(journal, segmentSize, 1);
journal.flushDirtyLogs();
lastModifiedTs[0] = clock.getMillis();
clock.tick(Period.seconds(30));
createBulkChunks(journal, segmentSize, 1);
journal.flushDirtyLogs();
lastModifiedTs[1] = clock.getMillis();
int i = 0;
for (final LogSegment segment : journal.getSegments()) {
assertTrue(i < 2);
segment.lastModified_$eq(lastModifiedTs[i]);
i++;
}
int cleanedLogs = journal.cleanupLogs();
assertEquals("no segments should've been cleaned", cleanedLogs, 0);
assertEquals("two segments segment should remain", countSegmentsInDir(messageJournalDir), 2);
// move clock beyond the retention period and clean again
clock.tick(Period.seconds(120));
cleanedLogs = journal.cleanupLogs();
assertEquals("two segments should've been cleaned (only one will actually be removed...)", cleanedLogs, 2);
assertEquals("one segment should remain", countSegmentsInDir(messageJournalDir), 1);
} finally {
DateTimeUtils.setCurrentMillisSystem();
}
}
Aggregations