use of org.apache.kafka.common.record.UnalignedMemoryRecords in project kafka by apache.
the class FileRawSnapshotTest method testAppendToFrozenSnapshot.
@Test
public void testAppendToFrozenSnapshot() throws IOException {
OffsetAndEpoch offsetAndEpoch = new OffsetAndEpoch(10L, 3);
int bufferSize = 256;
int numberOfBatches = 10;
try (FileRawSnapshotWriter snapshot = createSnapshotWriter(tempDir, offsetAndEpoch)) {
UnalignedMemoryRecords records = buildRecords(ByteBuffer.wrap(randomBytes(bufferSize)));
for (int i = 0; i < numberOfBatches; i++) {
snapshot.append(records);
}
snapshot.freeze();
assertThrows(RuntimeException.class, () -> snapshot.append(records));
}
// File should exist and the size should be greater than the sum of all the buffers
assertTrue(Files.exists(Snapshots.snapshotPath(tempDir, offsetAndEpoch)));
assertTrue(Files.size(Snapshots.snapshotPath(tempDir, offsetAndEpoch)) > bufferSize * numberOfBatches);
}
use of org.apache.kafka.common.record.UnalignedMemoryRecords in project kafka by apache.
the class FileRawSnapshotTest method testPartialWriteReadSnapshot.
@Test
public void testPartialWriteReadSnapshot() throws IOException {
Path tempDir = TestUtils.tempDirectory().toPath();
OffsetAndEpoch offsetAndEpoch = new OffsetAndEpoch(10L, 3);
ByteBuffer records = buildRecords(ByteBuffer.wrap(Utils.utf8("foo"))).buffer();
ByteBuffer expectedBuffer = ByteBuffer.wrap(records.array());
ByteBuffer buffer1 = expectedBuffer.duplicate();
buffer1.position(0);
buffer1.limit(expectedBuffer.limit() / 2);
ByteBuffer buffer2 = expectedBuffer.duplicate();
buffer2.position(expectedBuffer.limit() / 2);
buffer2.limit(expectedBuffer.limit());
try (FileRawSnapshotWriter snapshot = createSnapshotWriter(tempDir, offsetAndEpoch)) {
snapshot.append(new UnalignedMemoryRecords(buffer1));
snapshot.append(new UnalignedMemoryRecords(buffer2));
snapshot.freeze();
}
try (FileRawSnapshotReader snapshot = FileRawSnapshotReader.open(tempDir, offsetAndEpoch)) {
int totalSize = Math.toIntExact(snapshot.sizeInBytes());
assertEquals(expectedBuffer.remaining(), totalSize);
UnalignedFileRecords record1 = (UnalignedFileRecords) snapshot.slice(0, totalSize / 2);
UnalignedFileRecords record2 = (UnalignedFileRecords) snapshot.slice(totalSize / 2, totalSize - totalSize / 2);
assertEquals(buffer1, TestUtils.toBuffer(record1));
assertEquals(buffer2, TestUtils.toBuffer(record2));
ByteBuffer readBuffer = ByteBuffer.allocate(record1.sizeInBytes() + record2.sizeInBytes());
readBuffer.put(TestUtils.toBuffer(record1));
readBuffer.put(TestUtils.toBuffer(record2));
readBuffer.flip();
assertEquals(expectedBuffer, readBuffer);
}
}
use of org.apache.kafka.common.record.UnalignedMemoryRecords in project kafka by apache.
the class FileRawSnapshotTest method testWritingSnapshot.
@Test
public void testWritingSnapshot() throws IOException {
OffsetAndEpoch offsetAndEpoch = new OffsetAndEpoch(10L, 3);
int bufferSize = 256;
int numberOfBatches = 10;
int expectedSize = 0;
try (FileRawSnapshotWriter snapshot = createSnapshotWriter(tempDir, offsetAndEpoch)) {
assertEquals(0, snapshot.sizeInBytes());
UnalignedMemoryRecords records = buildRecords(ByteBuffer.wrap(randomBytes(bufferSize)));
for (int i = 0; i < numberOfBatches; i++) {
snapshot.append(records);
expectedSize += records.sizeInBytes();
}
assertEquals(expectedSize, snapshot.sizeInBytes());
snapshot.freeze();
}
// File should exist and the size should be the sum of all the buffers
assertTrue(Files.exists(Snapshots.snapshotPath(tempDir, offsetAndEpoch)));
assertEquals(expectedSize, Files.size(Snapshots.snapshotPath(tempDir, offsetAndEpoch)));
}
use of org.apache.kafka.common.record.UnalignedMemoryRecords in project kafka by apache.
the class MockRawSnapshotReader method slice.
@Override
public UnalignedRecords slice(long position, int size) {
ByteBuffer buffer = data.buffer();
buffer.position(Math.toIntExact(position));
buffer.limit(Math.min(buffer.limit(), Math.toIntExact(position + size)));
return new UnalignedMemoryRecords(buffer.slice());
}
use of org.apache.kafka.common.record.UnalignedMemoryRecords in project kafka by apache.
the class KafkaRaftClientSnapshotTest method testPartialFetchSnapshotRequestAsLeader.
@Test
public void testPartialFetchSnapshotRequestAsLeader() throws Exception {
int localId = 0;
Set<Integer> voters = Utils.mkSet(localId, localId + 1);
OffsetAndEpoch snapshotId = new OffsetAndEpoch(2, 1);
List<String> records = Arrays.asList("foo", "bar");
RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters).appendToLog(snapshotId.epoch, records).build();
context.becomeLeader();
int epoch = context.currentEpoch();
context.advanceLocalLeaderHighWatermarkToLogEndOffset();
try (SnapshotWriter<String> snapshot = context.client.createSnapshot(snapshotId.offset - 1, snapshotId.epoch, 0).get()) {
assertEquals(snapshotId, snapshot.snapshotId());
snapshot.append(records);
snapshot.freeze();
}
RawSnapshotReader snapshot = context.log.readSnapshot(snapshotId).get();
// Fetch half of the snapshot
context.deliverRequest(fetchSnapshotRequest(context.metadataPartition, epoch, snapshotId, Math.toIntExact(snapshot.sizeInBytes() / 2), 0));
context.client.poll();
FetchSnapshotResponseData.PartitionSnapshot response = context.assertSentFetchSnapshotResponse(context.metadataPartition).get();
assertEquals(Errors.NONE, Errors.forCode(response.errorCode()));
assertEquals(snapshot.sizeInBytes(), response.size());
assertEquals(0, response.position());
assertEquals(snapshot.sizeInBytes() / 2, response.unalignedRecords().sizeInBytes());
UnalignedMemoryRecords memoryRecords = (UnalignedMemoryRecords) snapshot.slice(0, Math.toIntExact(snapshot.sizeInBytes()));
ByteBuffer snapshotBuffer = memoryRecords.buffer();
ByteBuffer responseBuffer = ByteBuffer.allocate(Math.toIntExact(snapshot.sizeInBytes()));
responseBuffer.put(((UnalignedMemoryRecords) response.unalignedRecords()).buffer());
ByteBuffer expectedBytes = snapshotBuffer.duplicate();
expectedBytes.limit(Math.toIntExact(snapshot.sizeInBytes() / 2));
assertEquals(expectedBytes, responseBuffer.duplicate().flip());
// Fetch the remainder of the snapshot
context.deliverRequest(fetchSnapshotRequest(context.metadataPartition, epoch, snapshotId, Integer.MAX_VALUE, responseBuffer.position()));
context.client.poll();
response = context.assertSentFetchSnapshotResponse(context.metadataPartition).get();
assertEquals(Errors.NONE, Errors.forCode(response.errorCode()));
assertEquals(snapshot.sizeInBytes(), response.size());
assertEquals(responseBuffer.position(), response.position());
assertEquals(snapshot.sizeInBytes() - (snapshot.sizeInBytes() / 2), response.unalignedRecords().sizeInBytes());
responseBuffer.put(((UnalignedMemoryRecords) response.unalignedRecords()).buffer());
assertEquals(snapshotBuffer, responseBuffer.flip());
}
Aggregations