use of com.github.joschi.jadconfig.util.Size in project graylog2-server by Graylog2.
the class KafkaJournalTest method segmentRotation.
@Test
public void segmentRotation() throws Exception {
final Size segmentSize = Size.kilobytes(1L);
final KafkaJournal journal = new KafkaJournal(journalDirectory, scheduler, segmentSize, Duration.standardHours(1), Size.kilobytes(10L), Duration.standardDays(1), 1_000_000, Duration.standardMinutes(1), 100, new MetricRegistry(), serverStatus);
createBulkChunks(journal, segmentSize, 3);
final File[] files = journalDirectory.listFiles();
assertNotNull(files);
assertTrue("there should be files in the journal directory", files.length > 0);
final File[] messageJournalDir = journalDirectory.listFiles((FileFilter) and(directoryFileFilter(), nameFileFilter("messagejournal-0")));
assertTrue(messageJournalDir.length == 1);
final File[] logFiles = messageJournalDir[0].listFiles((FileFilter) and(fileFileFilter(), suffixFileFilter(".log")));
assertEquals("should have two journal segments", 3, logFiles.length);
}
use of com.github.joschi.jadconfig.util.Size in project graylog2-server by Graylog2.
the class KafkaJournalTest method serverStatusThrottledIfJournalUtilizationIsHigherThanThreshold.
@Test
public void serverStatusThrottledIfJournalUtilizationIsHigherThanThreshold() throws Exception {
serverStatus.running();
final Size segmentSize = Size.kilobytes(1L);
final KafkaJournal journal = new KafkaJournal(journalDirectory, scheduler, segmentSize, Duration.standardSeconds(1L), Size.kilobytes(4L), Duration.standardHours(1L), 1_000_000, Duration.standardSeconds(1L), 90, new MetricRegistry(), serverStatus);
createBulkChunks(journal, segmentSize, 4);
journal.flushDirtyLogs();
journal.cleanupLogs();
assertThat(serverStatus.getLifecycle()).isEqualTo(Lifecycle.THROTTLED);
}
use of com.github.joschi.jadconfig.util.Size in project graylog2-server by Graylog2.
the class KafkaJournalTest method maxSegmentSize.
@Test
public void maxSegmentSize() throws Exception {
final Size segmentSize = Size.kilobytes(1L);
final KafkaJournal journal = new KafkaJournal(journalDirectory, scheduler, segmentSize, Duration.standardHours(1), Size.kilobytes(10L), Duration.standardDays(1), 1_000_000, Duration.standardMinutes(1), 100, new MetricRegistry(), serverStatus);
long size = 0L;
long maxSize = segmentSize.toBytes();
final List<Journal.Entry> list = Lists.newArrayList();
while (size <= maxSize) {
final byte[] idBytes = ("the1-id").getBytes(UTF_8);
final byte[] messageBytes = ("the1-message").getBytes(UTF_8);
size += idBytes.length + messageBytes.length;
list.add(journal.createEntry(idBytes, messageBytes));
}
// Make sure all messages have been written
assertThat(journal.write(list)).isEqualTo(list.size() - 1);
}
use of com.github.joschi.jadconfig.util.Size in project graylog2-server by Graylog2.
the class KafkaJournalTest method segmentCommittedCleanup.
@Test
public void segmentCommittedCleanup() throws Exception {
final Size segmentSize = Size.kilobytes(1L);
final KafkaJournal journal = new KafkaJournal(journalDirectory, scheduler, segmentSize, Duration.standardHours(1), // never clean by size in this test
Size.petabytes(1L), Duration.standardDays(1), 1_000_000, Duration.standardMinutes(1), 100, new MetricRegistry(), serverStatus);
final File messageJournalDir = new File(journalDirectory, "messagejournal-0");
assertTrue(messageJournalDir.exists());
final int bulkSize = createBulkChunks(journal, segmentSize, 3);
// make sure everything is on disk
journal.flushDirtyLogs();
assertEquals(countSegmentsInDir(messageJournalDir), 3);
// we haven't committed any offsets, this should not touch anything.
final int cleanedLogs = journal.cleanupLogs();
assertEquals(cleanedLogs, 0);
final int numberOfSegments = countSegmentsInDir(messageJournalDir);
assertEquals(numberOfSegments, 3);
// mark first half of first segment committed, should not clean anything
journal.markJournalOffsetCommitted(bulkSize / 2);
assertEquals("should not touch segments", journal.cleanupLogs(), 0);
assertEquals(countSegmentsInDir(messageJournalDir), 3);
journal.markJournalOffsetCommitted(bulkSize + 1);
assertEquals("first segment should've been purged", journal.cleanupLogs(), 1);
assertEquals(countSegmentsInDir(messageJournalDir), 2);
journal.markJournalOffsetCommitted(bulkSize * 4);
assertEquals("only purge one segment, not the active one", journal.cleanupLogs(), 1);
assertEquals(countSegmentsInDir(messageJournalDir), 1);
}
use of com.github.joschi.jadconfig.util.Size in project graylog2-server by Graylog2.
the class LocalKafkaJournalTest method segmentCommittedCleanup.
@Test
public void segmentCommittedCleanup() throws Exception {
final Size segmentSize = Size.kilobytes(1L);
final LocalKafkaJournal journal = new LocalKafkaJournal(journalDirectory.toPath(), scheduler, segmentSize, Duration.standardHours(1), // never clean by size in this test
Size.petabytes(1L), Duration.standardDays(1), 1_000_000, Duration.standardMinutes(1), 100, new MetricRegistry(), serverStatus);
final File messageJournalDir = new File(journalDirectory, "messagejournal-0");
assertTrue(messageJournalDir.exists());
final int bulkSize = createBulkChunks(journal, segmentSize, 3);
// make sure everything is on disk
journal.flushDirtyLogs();
assertEquals(3, countSegmentsInDir(messageJournalDir));
// we haven't committed any offsets, this should not touch anything.
final int cleanedLogs = journal.cleanupLogs();
assertEquals(0, cleanedLogs);
final int numberOfSegments = countSegmentsInDir(messageJournalDir);
assertEquals(3, numberOfSegments);
// mark first half of first segment committed, should not clean anything
journal.markJournalOffsetCommitted(bulkSize / 2);
assertEquals("should not touch segments", 0, journal.cleanupLogs());
assertEquals(3, countSegmentsInDir(messageJournalDir));
journal.markJournalOffsetCommitted(bulkSize + 1);
assertEquals("first segment should've been purged", 1, journal.cleanupLogs());
assertEquals(2, countSegmentsInDir(messageJournalDir));
journal.markJournalOffsetCommitted(bulkSize * 4);
assertEquals("only purge one segment, not the active one", 1, journal.cleanupLogs());
assertEquals(1, countSegmentsInDir(messageJournalDir));
}
Aggregations