use of com.github.joschi.jadconfig.util.Size in project graylog2-server by Graylog2.
the class LocalKafkaJournalTest method serverStatusThrottledIfJournalUtilizationIsHigherThanThreshold.
@Test
public void serverStatusThrottledIfJournalUtilizationIsHigherThanThreshold() throws Exception {
serverStatus.running();
final Size segmentSize = Size.kilobytes(1L);
final LocalKafkaJournal journal = new LocalKafkaJournal(journalDirectory.toPath(), scheduler, segmentSize, Duration.standardSeconds(1L), Size.kilobytes(4L), Duration.standardHours(1L), 1_000_000, Duration.standardSeconds(1L), 90, new MetricRegistry(), serverStatus);
createBulkChunks(journal, segmentSize, 4);
journal.flushDirtyLogs();
journal.cleanupLogs();
assertThat(serverStatus.getLifecycle()).isEqualTo(Lifecycle.THROTTLED);
}
use of com.github.joschi.jadconfig.util.Size in project graylog2-server by Graylog2.
the class LocalKafkaJournalTest method serverStatusUnthrottledIfJournalUtilizationIsLowerThanThreshold.
@Test
public void serverStatusUnthrottledIfJournalUtilizationIsLowerThanThreshold() throws Exception {
serverStatus.throttle();
final Size segmentSize = Size.kilobytes(1L);
final LocalKafkaJournal journal = new LocalKafkaJournal(journalDirectory.toPath(), scheduler, segmentSize, Duration.standardSeconds(1L), Size.kilobytes(4L), Duration.standardHours(1L), 1_000_000, Duration.standardSeconds(1L), 90, new MetricRegistry(), serverStatus);
journal.flushDirtyLogs();
journal.cleanupLogs();
assertThat(serverStatus.getLifecycle()).isEqualTo(Lifecycle.RUNNING);
}
use of com.github.joschi.jadconfig.util.Size in project graylog2-server by Graylog2.
the class LocalKafkaJournalTest method truncatedSegment.
@Test
public void truncatedSegment() throws Exception {
final Size segmentSize = Size.kilobytes(1L);
final LocalKafkaJournal journal = new LocalKafkaJournal(journalDirectory.toPath(), scheduler, segmentSize, Duration.standardHours(1), Size.kilobytes(10L), Duration.standardDays(1), 1_000_000, Duration.standardMinutes(1), 100, new MetricRegistry(), serverStatus);
// this will create two segments, each containing 25 messages
createBulkChunks(journal, segmentSize, 2);
final Path firstSegmentPath = Paths.get(journalDirectory.getAbsolutePath(), "messagejournal-0", "00000000000000000000.log");
assertThat(firstSegmentPath).isRegularFile();
// truncate the first segment so that the last message is cut off
final File firstSegment = firstSegmentPath.toFile();
try (FileChannel channel = new FileOutputStream(firstSegment, true).getChannel()) {
channel.truncate(firstSegment.length() - 1);
}
final List<Journal.JournalReadEntry> entriesFromFirstSegment = journal.read(25);
assertThat(entriesFromFirstSegment).hasSize(24);
final List<Journal.JournalReadEntry> entriesFromSecondSegment = journal.read(25);
assertThat(entriesFromSecondSegment).hasSize(25);
}
use of com.github.joschi.jadconfig.util.Size in project graylog2-server by Graylog2.
the class KafkaJournalTest method segmentAgeCleanup.
@Test
public void segmentAgeCleanup() throws Exception {
final InstantMillisProvider clock = new InstantMillisProvider(DateTime.now(DateTimeZone.UTC));
DateTimeUtils.setCurrentMillisProvider(clock);
try {
final Size segmentSize = Size.kilobytes(1L);
final KafkaJournal journal = new KafkaJournal(journalDirectory, scheduler, segmentSize, Duration.standardHours(1), Size.kilobytes(10L), Duration.standardMinutes(1), 1_000_000, Duration.standardMinutes(1), 100, new MetricRegistry(), serverStatus);
final File messageJournalDir = new File(journalDirectory, "messagejournal-0");
assertTrue(messageJournalDir.exists());
// we need to fix up the last modified times of the actual files.
long[] lastModifiedTs = new long[2];
// create two chunks, 30 seconds apart
createBulkChunks(journal, segmentSize, 1);
journal.flushDirtyLogs();
lastModifiedTs[0] = clock.getMillis();
clock.tick(Period.seconds(30));
createBulkChunks(journal, segmentSize, 1);
journal.flushDirtyLogs();
lastModifiedTs[1] = clock.getMillis();
int i = 0;
for (final LogSegment segment : journal.getSegments()) {
assertTrue(i < 2);
segment.lastModified_$eq(lastModifiedTs[i]);
i++;
}
int cleanedLogs = journal.cleanupLogs();
assertEquals("no segments should've been cleaned", cleanedLogs, 0);
assertEquals("two segments segment should remain", countSegmentsInDir(messageJournalDir), 2);
// move clock beyond the retention period and clean again
clock.tick(Period.seconds(120));
cleanedLogs = journal.cleanupLogs();
assertEquals("two segments should've been cleaned (only one will actually be removed...)", cleanedLogs, 2);
assertEquals("one segment should remain", countSegmentsInDir(messageJournalDir), 1);
} finally {
DateTimeUtils.setCurrentMillisSystem();
}
}
use of com.github.joschi.jadconfig.util.Size in project graylog2-server by Graylog2.
the class KafkaJournalTest method serverStatusUnthrottledIfJournalUtilizationIsLowerThanThreshold.
@Test
public void serverStatusUnthrottledIfJournalUtilizationIsLowerThanThreshold() throws Exception {
serverStatus.throttle();
final Size segmentSize = Size.kilobytes(1L);
final KafkaJournal journal = new KafkaJournal(journalDirectory, scheduler, segmentSize, Duration.standardSeconds(1L), Size.kilobytes(4L), Duration.standardHours(1L), 1_000_000, Duration.standardSeconds(1L), 90, new MetricRegistry(), serverStatus);
journal.flushDirtyLogs();
journal.cleanupLogs();
assertThat(serverStatus.getLifecycle()).isEqualTo(Lifecycle.RUNNING);
}
Aggregations