Search in sources :

Example 16 with OffsetAndEpoch

use of org.apache.kafka.raft.OffsetAndEpoch in project kafka by apache.

the class FileRawSnapshotTest method testBatchWriteReadSnapshot.

@Test
public void testBatchWriteReadSnapshot() throws IOException {
    OffsetAndEpoch offsetAndEpoch = new OffsetAndEpoch(10L, 3);
    int bufferSize = 256;
    int batchSize = 3;
    int numberOfBatches = 10;
    try (FileRawSnapshotWriter snapshot = createSnapshotWriter(tempDir, offsetAndEpoch)) {
        for (int i = 0; i < numberOfBatches; i++) {
            ByteBuffer[] buffers = IntStream.range(0, batchSize).mapToObj(ignore -> ByteBuffer.wrap(randomBytes(bufferSize))).toArray(ByteBuffer[]::new);
            snapshot.append(buildRecords(buffers));
        }
        snapshot.freeze();
    }
    try (FileRawSnapshotReader snapshot = FileRawSnapshotReader.open(tempDir, offsetAndEpoch)) {
        int countBatches = 0;
        int countRecords = 0;
        Iterator<RecordBatch> batches = Utils.covariantCast(snapshot.records().batchIterator());
        while (batches.hasNext()) {
            RecordBatch batch = batches.next();
            countBatches += 1;
            Iterator<Record> records = batch.streamingIterator(new GrowableBufferSupplier());
            while (records.hasNext()) {
                Record record = records.next();
                countRecords += 1;
                assertFalse(record.hasKey());
                assertTrue(record.hasValue());
                assertEquals(bufferSize, record.value().remaining());
            }
        }
        assertEquals(numberOfBatches, countBatches);
        assertEquals(numberOfBatches * batchSize, countRecords);
    }
}
Also used : OffsetAndEpoch(org.apache.kafka.raft.OffsetAndEpoch) IntStream(java.util.stream.IntStream) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) BeforeEach(org.junit.jupiter.api.BeforeEach) UnalignedFileRecords(org.apache.kafka.common.record.UnalignedFileRecords) Arrays(java.util.Arrays) ByteBuffer(java.nio.ByteBuffer) Record(org.apache.kafka.common.record.Record) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) RecordBatch(org.apache.kafka.common.record.RecordBatch) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) Path(java.nio.file.Path) Utils(org.apache.kafka.common.utils.Utils) OffsetAndEpoch(org.apache.kafka.raft.OffsetAndEpoch) GrowableBufferSupplier(org.apache.kafka.common.utils.BufferSupplier.GrowableBufferSupplier) CompressionType(org.apache.kafka.common.record.CompressionType) Iterator(java.util.Iterator) TestUtils(org.apache.kafka.test.TestUtils) Files(java.nio.file.Files) IOException(java.io.IOException) Test(org.junit.jupiter.api.Test) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) AfterEach(org.junit.jupiter.api.AfterEach) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) UnalignedMemoryRecords(org.apache.kafka.common.record.UnalignedMemoryRecords) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) Optional(java.util.Optional) GrowableBufferSupplier(org.apache.kafka.common.utils.BufferSupplier.GrowableBufferSupplier) RecordBatch(org.apache.kafka.common.record.RecordBatch) Record(org.apache.kafka.common.record.Record) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.jupiter.api.Test)

Example 17 with OffsetAndEpoch

use of org.apache.kafka.raft.OffsetAndEpoch in project kafka by apache.

the class FileRawSnapshotTest method testCreateSnapshotWithSameId.

@Test
public void testCreateSnapshotWithSameId() {
    OffsetAndEpoch offsetAndEpoch = new OffsetAndEpoch(20L, 2);
    int bufferSize = 256;
    int numberOfBatches = 1;
    try (FileRawSnapshotWriter snapshot = createSnapshotWriter(tempDir, offsetAndEpoch)) {
        UnalignedMemoryRecords records = buildRecords(ByteBuffer.wrap(randomBytes(bufferSize)));
        for (int i = 0; i < numberOfBatches; i++) {
            snapshot.append(records);
        }
        snapshot.freeze();
    }
    // Create another snapshot with the same id
    try (FileRawSnapshotWriter snapshot = createSnapshotWriter(tempDir, offsetAndEpoch)) {
        UnalignedMemoryRecords records = buildRecords(ByteBuffer.wrap(randomBytes(bufferSize)));
        for (int i = 0; i < numberOfBatches; i++) {
            snapshot.append(records);
        }
        snapshot.freeze();
    }
}
Also used : OffsetAndEpoch(org.apache.kafka.raft.OffsetAndEpoch) UnalignedMemoryRecords(org.apache.kafka.common.record.UnalignedMemoryRecords) Test(org.junit.jupiter.api.Test)

Example 18 with OffsetAndEpoch

use of org.apache.kafka.raft.OffsetAndEpoch in project kafka by apache.

the class FileRawSnapshotTest method testBufferWriteReadSnapshot.

@Test
public void testBufferWriteReadSnapshot() throws IOException {
    OffsetAndEpoch offsetAndEpoch = new OffsetAndEpoch(10L, 3);
    int bufferSize = 256;
    int batchSize = 3;
    int numberOfBatches = 10;
    int expectedSize = 0;
    try (FileRawSnapshotWriter snapshot = createSnapshotWriter(tempDir, offsetAndEpoch)) {
        for (int i = 0; i < numberOfBatches; i++) {
            ByteBuffer[] buffers = IntStream.range(0, batchSize).mapToObj(ignore -> ByteBuffer.wrap(randomBytes(bufferSize))).toArray(ByteBuffer[]::new);
            UnalignedMemoryRecords records = buildRecords(buffers);
            snapshot.append(records);
            expectedSize += records.sizeInBytes();
        }
        assertEquals(expectedSize, snapshot.sizeInBytes());
        snapshot.freeze();
    }
    // File should exist and the size should be the sum of all the buffers
    assertTrue(Files.exists(Snapshots.snapshotPath(tempDir, offsetAndEpoch)));
    assertEquals(expectedSize, Files.size(Snapshots.snapshotPath(tempDir, offsetAndEpoch)));
    try (FileRawSnapshotReader snapshot = FileRawSnapshotReader.open(tempDir, offsetAndEpoch)) {
        int countBatches = 0;
        int countRecords = 0;
        Iterator<RecordBatch> batches = Utils.covariantCast(snapshot.records().batchIterator());
        while (batches.hasNext()) {
            RecordBatch batch = batches.next();
            countBatches += 1;
            Iterator<Record> records = batch.streamingIterator(new GrowableBufferSupplier());
            while (records.hasNext()) {
                Record record = records.next();
                countRecords += 1;
                assertFalse(record.hasKey());
                assertTrue(record.hasValue());
                assertEquals(bufferSize, record.value().remaining());
            }
        }
        assertEquals(numberOfBatches, countBatches);
        assertEquals(numberOfBatches * batchSize, countRecords);
    }
}
Also used : OffsetAndEpoch(org.apache.kafka.raft.OffsetAndEpoch) IntStream(java.util.stream.IntStream) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) BeforeEach(org.junit.jupiter.api.BeforeEach) UnalignedFileRecords(org.apache.kafka.common.record.UnalignedFileRecords) Arrays(java.util.Arrays) ByteBuffer(java.nio.ByteBuffer) Record(org.apache.kafka.common.record.Record) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) RecordBatch(org.apache.kafka.common.record.RecordBatch) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) Path(java.nio.file.Path) Utils(org.apache.kafka.common.utils.Utils) OffsetAndEpoch(org.apache.kafka.raft.OffsetAndEpoch) GrowableBufferSupplier(org.apache.kafka.common.utils.BufferSupplier.GrowableBufferSupplier) CompressionType(org.apache.kafka.common.record.CompressionType) Iterator(java.util.Iterator) TestUtils(org.apache.kafka.test.TestUtils) Files(java.nio.file.Files) IOException(java.io.IOException) Test(org.junit.jupiter.api.Test) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) AfterEach(org.junit.jupiter.api.AfterEach) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) UnalignedMemoryRecords(org.apache.kafka.common.record.UnalignedMemoryRecords) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) Optional(java.util.Optional) GrowableBufferSupplier(org.apache.kafka.common.utils.BufferSupplier.GrowableBufferSupplier) RecordBatch(org.apache.kafka.common.record.RecordBatch) ByteBuffer(java.nio.ByteBuffer) UnalignedMemoryRecords(org.apache.kafka.common.record.UnalignedMemoryRecords) Record(org.apache.kafka.common.record.Record) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) Test(org.junit.jupiter.api.Test)

Example 19 with OffsetAndEpoch

use of org.apache.kafka.raft.OffsetAndEpoch in project kafka by apache.

the class FileRawSnapshotTest method testWriteReadSnapshot.

@Test
public void testWriteReadSnapshot() throws IOException {
    OffsetAndEpoch offsetAndEpoch = new OffsetAndEpoch(10L, 3);
    int bufferSize = 256;
    int numberOfBatches = 10;
    ByteBuffer expectedBuffer = ByteBuffer.wrap(randomBytes(bufferSize));
    try (FileRawSnapshotWriter snapshot = createSnapshotWriter(tempDir, offsetAndEpoch)) {
        UnalignedMemoryRecords records = buildRecords(expectedBuffer);
        for (int i = 0; i < numberOfBatches; i++) {
            snapshot.append(records);
        }
        snapshot.freeze();
    }
    try (FileRawSnapshotReader snapshot = FileRawSnapshotReader.open(tempDir, offsetAndEpoch)) {
        int countBatches = 0;
        int countRecords = 0;
        Iterator<RecordBatch> batches = Utils.covariantCast(snapshot.records().batchIterator());
        while (batches.hasNext()) {
            RecordBatch batch = batches.next();
            countBatches += 1;
            Iterator<Record> records = batch.streamingIterator(new GrowableBufferSupplier());
            while (records.hasNext()) {
                Record record = records.next();
                countRecords += 1;
                assertFalse(record.hasKey());
                assertTrue(record.hasValue());
                assertEquals(bufferSize, record.value().remaining());
                assertEquals(expectedBuffer, record.value());
            }
        }
        assertEquals(numberOfBatches, countBatches);
        assertEquals(numberOfBatches, countRecords);
    }
}
Also used : OffsetAndEpoch(org.apache.kafka.raft.OffsetAndEpoch) GrowableBufferSupplier(org.apache.kafka.common.utils.BufferSupplier.GrowableBufferSupplier) RecordBatch(org.apache.kafka.common.record.RecordBatch) UnalignedMemoryRecords(org.apache.kafka.common.record.UnalignedMemoryRecords) Record(org.apache.kafka.common.record.Record) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.jupiter.api.Test)

Example 20 with OffsetAndEpoch

use of org.apache.kafka.raft.OffsetAndEpoch in project kafka by apache.

the class FileRawSnapshotTest method testAbortedSnapshot.

@Test
public void testAbortedSnapshot() throws IOException {
    OffsetAndEpoch offsetAndEpoch = new OffsetAndEpoch(20L, 2);
    int bufferSize = 256;
    int numberOfBatches = 10;
    try (FileRawSnapshotWriter snapshot = createSnapshotWriter(tempDir, offsetAndEpoch)) {
        UnalignedMemoryRecords records = buildRecords(ByteBuffer.wrap(randomBytes(bufferSize)));
        for (int i = 0; i < numberOfBatches; i++) {
            snapshot.append(records);
        }
    }
    // File should not exist since freeze was not called before
    assertFalse(Files.exists(Snapshots.snapshotPath(tempDir, offsetAndEpoch)));
    assertEquals(0, Files.list(Snapshots.snapshotDir(tempDir)).count());
}
Also used : OffsetAndEpoch(org.apache.kafka.raft.OffsetAndEpoch) UnalignedMemoryRecords(org.apache.kafka.common.record.UnalignedMemoryRecords) Test(org.junit.jupiter.api.Test)

Aggregations

OffsetAndEpoch (org.apache.kafka.raft.OffsetAndEpoch)25 Test (org.junit.jupiter.api.Test)22 Path (java.nio.file.Path)8 UnalignedMemoryRecords (org.apache.kafka.common.record.UnalignedMemoryRecords)8 QuorumState (org.apache.kafka.raft.QuorumState)7 ByteBuffer (java.nio.ByteBuffer)4 RaftClientTestContext (org.apache.kafka.raft.RaftClientTestContext)4 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)4 ArrayList (java.util.ArrayList)3 List (java.util.List)3 Record (org.apache.kafka.common.record.Record)3 RecordBatch (org.apache.kafka.common.record.RecordBatch)3 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)3 UnalignedFileRecords (org.apache.kafka.common.record.UnalignedFileRecords)3 GrowableBufferSupplier (org.apache.kafka.common.utils.BufferSupplier.GrowableBufferSupplier)3 IOException (java.io.IOException)2 Files (java.nio.file.Files)2 Arrays (java.util.Arrays)2 Iterator (java.util.Iterator)2 Optional (java.util.Optional)2