use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class RecordsIteratorTest method testFileRecords.
@Property
public void testFileRecords(@ForAll CompressionType compressionType, @ForAll long seed) throws IOException {
List<TestBatch<String>> batches = createBatches(seed);
MemoryRecords memRecords = buildRecords(compressionType, batches);
FileRecords fileRecords = FileRecords.open(TestUtils.tempFile());
fileRecords.append(memRecords);
testIterator(batches, fileRecords);
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class KafkaRaftClientTest method testHandleCommitCallbackFiresAfterFollowerHighWatermarkAdvances.
@Test
public void testHandleCommitCallbackFiresAfterFollowerHighWatermarkAdvances() throws Exception {
int localId = 0;
int otherNodeId = 1;
int epoch = 5;
Set<Integer> voters = Utils.mkSet(localId, otherNodeId);
RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters).withElectedLeader(epoch, otherNodeId).build();
assertEquals(OptionalLong.empty(), context.client.highWatermark());
// Poll for our first fetch request
context.pollUntilRequest();
RaftRequest.Outbound fetchRequest = context.assertSentFetchRequest();
assertTrue(voters.contains(fetchRequest.destinationId()));
context.assertFetchRequestData(fetchRequest, epoch, 0L, 0);
// The response does not advance the high watermark
List<String> records1 = Arrays.asList("a", "b", "c");
MemoryRecords batch1 = context.buildBatch(0L, 3, records1);
context.deliverResponse(fetchRequest.correlationId, fetchRequest.destinationId(), context.fetchResponse(epoch, otherNodeId, batch1, 0L, Errors.NONE));
context.client.poll();
// The listener should not have seen any data
assertEquals(OptionalLong.of(0L), context.client.highWatermark());
assertEquals(0, context.listener.numCommittedBatches());
assertEquals(OptionalInt.empty(), context.listener.currentClaimedEpoch());
// Now look for the next fetch request
context.pollUntilRequest();
fetchRequest = context.assertSentFetchRequest();
assertTrue(voters.contains(fetchRequest.destinationId()));
context.assertFetchRequestData(fetchRequest, epoch, 3L, 3);
// The high watermark advances to include the first batch we fetched
List<String> records2 = Arrays.asList("d", "e", "f");
MemoryRecords batch2 = context.buildBatch(3L, 3, records2);
context.deliverResponse(fetchRequest.correlationId, fetchRequest.destinationId(), context.fetchResponse(epoch, otherNodeId, batch2, 3L, Errors.NONE));
context.client.poll();
// The listener should have seen only the data from the first batch
assertEquals(OptionalLong.of(3L), context.client.highWatermark());
assertEquals(1, context.listener.numCommittedBatches());
assertEquals(OptionalLong.of(2L), context.listener.lastCommitOffset());
assertEquals(records1, context.listener.lastCommit().records());
assertEquals(OptionalInt.empty(), context.listener.currentClaimedEpoch());
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class SendBuilderTest method testZeroCopyRecords.
@Test
public void testZeroCopyRecords() {
ByteBuffer buffer = ByteBuffer.allocate(128);
MemoryRecords records = createRecords(buffer, "foo");
SendBuilder builder = new SendBuilder(8);
builder.writeInt(5);
builder.writeRecords(records);
builder.writeInt(15);
Send send = builder.build();
// Overwrite the original buffer in order to prove the data was not copied
buffer.rewind();
MemoryRecords overwrittenRecords = createRecords(buffer, "bar");
ByteBuffer readBuffer = TestUtils.toBuffer(send);
assertEquals(5, readBuffer.getInt());
assertEquals(overwrittenRecords, getRecords(readBuffer, records.sizeInBytes()));
assertEquals(15, readBuffer.getInt());
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class SendBuilderTest method testZeroCopyUnalignedRecords.
@Test
public void testZeroCopyUnalignedRecords() {
ByteBuffer buffer = ByteBuffer.allocate(128);
MemoryRecords records = createRecords(buffer, "foo");
ByteBuffer buffer1 = records.buffer().duplicate();
buffer1.limit(buffer1.limit() / 2);
ByteBuffer buffer2 = records.buffer().duplicate();
buffer2.position(buffer2.limit() / 2);
UnalignedMemoryRecords records1 = new UnalignedMemoryRecords(buffer1);
UnalignedMemoryRecords records2 = new UnalignedMemoryRecords(buffer2);
SendBuilder builder = new SendBuilder(8);
builder.writeInt(5);
builder.writeRecords(records1);
builder.writeRecords(records2);
builder.writeInt(15);
Send send = builder.build();
// Overwrite the original buffer in order to prove the data was not copied
buffer.rewind();
MemoryRecords overwrittenRecords = createRecords(buffer, "bar");
ByteBuffer readBuffer = TestUtils.toBuffer(send);
assertEquals(5, readBuffer.getInt());
assertEquals(overwrittenRecords, getRecords(readBuffer, records.sizeInBytes()));
assertEquals(15, readBuffer.getInt());
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class TypeTest method testRecordsSerde.
@Test
public void testRecordsSerde() {
MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("foo".getBytes()), new SimpleRecord("bar".getBytes()));
ByteBuffer buffer = ByteBuffer.allocate(Type.RECORDS.sizeOf(records));
Type.RECORDS.write(buffer, records);
buffer.flip();
assertEquals(records, Type.RECORDS.read(buffer));
}
Aggregations