use of org.graylog.shaded.kafka09.log.LogSegment in project graylog2-server by Graylog2.
the class JournalShow method appendSegmentDetails.
private void appendSegmentDetails(LocalKafkaJournal journal, StringBuilder sb) {
final Iterable<LogSegment> segments = journal.getSegments();
int i = 1;
for (LogSegment segment : segments) {
sb.append("\t\t").append("Segment ").append(i++).append("\n");
sb.append("\t\t\t").append("Base offset: ").append(segment.baseOffset()).append("\n");
sb.append("\t\t\t").append("Size in bytes: ").append(segment.size()).append("\n");
sb.append("\t\t\t").append("Created at: ").append(new DateTime(segment.created(), DateTimeZone.UTC)).append("\n");
sb.append("\t\t\t").append("Last modified: ").append(new DateTime(segment.lastModified(), DateTimeZone.UTC)).append("\n");
}
}
use of org.graylog.shaded.kafka09.log.LogSegment in project graylog2-server by Graylog2.
the class LocalKafkaJournal method getLogStartOffset.
/**
* Returns the first valid offset in the entire journal.
*
* @return first offset
*/
public long getLogStartOffset() {
final Iterable<LogSegment> logSegments = JavaConversions.asJavaIterable(kafkaLog.logSegments());
final LogSegment segment = Iterables.getFirst(logSegments, null);
if (segment == null) {
return 0;
}
return segment.baseOffset();
}
use of org.graylog.shaded.kafka09.log.LogSegment in project graylog2-server by Graylog2.
the class LocalKafkaJournalTest method segmentAgeCleanup.
@Test
public void segmentAgeCleanup() throws Exception {
final InstantMillisProvider clock = new InstantMillisProvider(DateTime.now(DateTimeZone.UTC));
DateTimeUtils.setCurrentMillisProvider(clock);
try {
final Size segmentSize = Size.kilobytes(1L);
final LocalKafkaJournal journal = new LocalKafkaJournal(journalDirectory.toPath(), scheduler, segmentSize, Duration.standardHours(1), Size.kilobytes(10L), Duration.standardMinutes(1), 1_000_000, Duration.standardMinutes(1), 100, new MetricRegistry(), serverStatus);
final File messageJournalDir = new File(journalDirectory, "messagejournal-0");
assertTrue(messageJournalDir.exists());
// we need to fix up the last modified times of the actual files.
long[] lastModifiedTs = new long[2];
// create two chunks, 30 seconds apart
createBulkChunks(journal, segmentSize, 1);
journal.flushDirtyLogs();
lastModifiedTs[0] = clock.getMillis();
clock.tick(Period.seconds(30));
createBulkChunks(journal, segmentSize, 1);
journal.flushDirtyLogs();
lastModifiedTs[1] = clock.getMillis();
int i = 0;
for (final LogSegment segment : journal.getSegments()) {
assertTrue(i < 2);
segment.lastModified_$eq(lastModifiedTs[i]);
i++;
}
int cleanedLogs = journal.cleanupLogs();
assertEquals("no segments should've been cleaned", 0, cleanedLogs);
assertEquals("two segments segment should remain", 2, countSegmentsInDir(messageJournalDir));
// move clock beyond the retention period and clean again
clock.tick(Period.seconds(120));
cleanedLogs = journal.cleanupLogs();
assertEquals("two segments should've been cleaned (only one will actually be removed...)", 2, cleanedLogs);
assertEquals("one segment should remain", 1, countSegmentsInDir(messageJournalDir));
} finally {
DateTimeUtils.setCurrentMillisSystem();
}
}
use of org.graylog.shaded.kafka09.log.LogSegment in project graylog2-server by Graylog2.
the class JournalResource method show.
@GET
@Timed
@ApiOperation(value = "Get current state of the journal on this node.")
@RequiresPermissions(RestPermissions.JOURNAL_READ)
public JournalSummaryResponse show() {
if (!journalEnabled) {
return JournalSummaryResponse.createDisabled();
}
if (journal instanceof LocalKafkaJournal) {
final LocalKafkaJournal kafkaJournal = (LocalKafkaJournal) journal;
final ThrottleState throttleState = kafkaJournal.getThrottleState();
long oldestSegment = Long.MAX_VALUE;
for (final LogSegment segment : kafkaJournal.getSegments()) {
oldestSegment = Math.min(oldestSegment, segment.created());
}
return JournalSummaryResponse.createEnabled(throttleState.appendEventsPerSec, throttleState.readEventsPerSec, throttleState.uncommittedJournalEntries, Size.bytes(throttleState.journalSize), Size.bytes(throttleState.journalSizeLimit), kafkaJournal.numberOfSegments(), new DateTime(oldestSegment, DateTimeZone.UTC), KafkaJournalConfigurationSummary.of(kafkaJournalConfiguration));
}
log.warn("Unknown Journal implementation {} in use, cannot get information about it. Pretending journal is disabled.", journal.getClass());
return JournalSummaryResponse.createDisabled();
}
Aggregations