use of org.graylog2.shared.journal.KafkaJournal in project graylog2-server by Graylog2.
the class ThrottleStateUpdaterThread method doRun.
@Override
public void doRun() {
throttleState = new ThrottleState(throttleState);
final long committedOffset = journal.getCommittedOffset();
// TODO there's a lot of duplication around this class. Probably should be refactored a bit.
// also update metrics for each of the values, so clients can get to it cheaply
long prevTs = currentTs;
currentTs = System.nanoTime();
long previousLogEndOffset = logEndOffset;
long previousReadOffset = currentReadOffset;
long logStartOffset = journal.getLogStartOffset();
// -1 because getLogEndOffset is the next offset that gets assigned
logEndOffset = journal.getLogEndOffset() - 1;
// just to make it clear which field we read
currentReadOffset = journal.getNextReadOffset() - 1;
// for the first run, don't send an update, there's no previous data available to calc rates
if (firstRun) {
firstRun = false;
return;
}
throttleState.appendEventsPerSec = (long) Math.floor((logEndOffset - previousLogEndOffset) / ((currentTs - prevTs) / 1.0E09));
throttleState.readEventsPerSec = (long) Math.floor((currentReadOffset - previousReadOffset) / ((currentTs - prevTs) / 1.0E09));
throttleState.journalSize = journal.size();
throttleState.journalSizeLimit = retentionSize.toBytes();
throttleState.processBufferCapacity = processBuffer.getRemainingCapacity();
if (committedOffset == KafkaJournal.DEFAULT_COMMITTED_OFFSET) {
// nothing committed at all, the entire log is uncommitted, or completely empty.
throttleState.uncommittedJournalEntries = journal.size() == 0 ? 0 : logEndOffset - logStartOffset;
} else {
throttleState.uncommittedJournalEntries = logEndOffset - committedOffset;
}
log.debug("ThrottleState: {}", throttleState);
// the journal needs this to provide information to rest clients
journal.setThrottleState(throttleState);
// Abusing the current thread to send notifications from KafkaJournal in the graylog2-shared module
final double journalUtilizationPercentage = throttleState.journalSizeLimit > 0 ? (throttleState.journalSize * 100) / throttleState.journalSizeLimit : 0.0;
if (journalUtilizationPercentage > KafkaJournal.NOTIFY_ON_UTILIZATION_PERCENTAGE) {
Notification notification = notificationService.buildNow().addNode(serverStatus.getNodeId().toString()).addType(Notification.Type.JOURNAL_UTILIZATION_TOO_HIGH).addSeverity(Notification.Severity.URGENT).addDetail("journal_utilization_percentage", journalUtilizationPercentage);
notificationService.publishIfFirst(notification);
}
if (journal.getPurgedSegmentsInLastRetention() > 0) {
Notification notification = notificationService.buildNow().addNode(serverStatus.getNodeId().toString()).addType(Notification.Type.JOURNAL_UNCOMMITTED_MESSAGES_DELETED).addSeverity(Notification.Severity.URGENT);
notificationService.publishIfFirst(notification);
}
}
use of org.graylog2.shared.journal.KafkaJournal in project graylog2-server by Graylog2.
the class JournalResource method show.
@GET
@Timed
@ApiOperation(value = "Get current state of the journal on this node.")
@RequiresPermissions(RestPermissions.JOURNAL_READ)
public JournalSummaryResponse show() {
if (!journalEnabled) {
return JournalSummaryResponse.createDisabled();
}
if (journal instanceof KafkaJournal) {
final KafkaJournal kafkaJournal = (KafkaJournal) journal;
final ThrottleState throttleState = kafkaJournal.getThrottleState();
long oldestSegment = Long.MAX_VALUE;
for (final LogSegment segment : kafkaJournal.getSegments()) {
oldestSegment = Math.min(oldestSegment, segment.created());
}
return JournalSummaryResponse.createEnabled(throttleState.appendEventsPerSec, throttleState.readEventsPerSec, throttleState.uncommittedJournalEntries, Size.bytes(throttleState.journalSize), Size.bytes(throttleState.journalSizeLimit), kafkaJournal.numberOfSegments(), new DateTime(oldestSegment, DateTimeZone.UTC), kafkaJournalConfiguration);
}
log.warn("Unknown Journal implementation {} in use, cannot get information about it. Pretending journal is disabled.", journal.getClass());
return JournalSummaryResponse.createDisabled();
}
use of org.graylog2.shared.journal.KafkaJournal in project graylog2-server by Graylog2.
the class KafkaJournalTest method segmentAgeCleanup.
@Test
public void segmentAgeCleanup() throws Exception {
final InstantMillisProvider clock = new InstantMillisProvider(DateTime.now(DateTimeZone.UTC));
DateTimeUtils.setCurrentMillisProvider(clock);
try {
final Size segmentSize = Size.kilobytes(1L);
final KafkaJournal journal = new KafkaJournal(journalDirectory, scheduler, segmentSize, Duration.standardHours(1), Size.kilobytes(10L), Duration.standardMinutes(1), 1_000_000, Duration.standardMinutes(1), 100, new MetricRegistry(), serverStatus);
final File messageJournalDir = new File(journalDirectory, "messagejournal-0");
assertTrue(messageJournalDir.exists());
// we need to fix up the last modified times of the actual files.
long[] lastModifiedTs = new long[2];
// create two chunks, 30 seconds apart
createBulkChunks(journal, segmentSize, 1);
journal.flushDirtyLogs();
lastModifiedTs[0] = clock.getMillis();
clock.tick(Period.seconds(30));
createBulkChunks(journal, segmentSize, 1);
journal.flushDirtyLogs();
lastModifiedTs[1] = clock.getMillis();
int i = 0;
for (final LogSegment segment : journal.getSegments()) {
assertTrue(i < 2);
segment.lastModified_$eq(lastModifiedTs[i]);
i++;
}
int cleanedLogs = journal.cleanupLogs();
assertEquals("no segments should've been cleaned", cleanedLogs, 0);
assertEquals("two segments segment should remain", countSegmentsInDir(messageJournalDir), 2);
// move clock beyond the retention period and clean again
clock.tick(Period.seconds(120));
cleanedLogs = journal.cleanupLogs();
assertEquals("two segments should've been cleaned (only one will actually be removed...)", cleanedLogs, 2);
assertEquals("one segment should remain", countSegmentsInDir(messageJournalDir), 1);
} finally {
DateTimeUtils.setCurrentMillisSystem();
}
}
Aggregations