use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testUpdatePositionWithLastRecordMissingFromBatch.
@Test
public void testUpdatePositionWithLastRecordMissingFromBatch() {
MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("0".getBytes(), "v".getBytes()), new SimpleRecord("1".getBytes(), "v".getBytes()), new SimpleRecord("2".getBytes(), "v".getBytes()), new SimpleRecord(null, "value".getBytes()));
// Remove the last record to simulate compaction
MemoryRecords.FilterResult result = records.filterTo(tp0, new MemoryRecords.RecordFilter() {
@Override
protected BatchRetention checkBatchRetention(RecordBatch batch) {
return BatchRetention.DELETE_EMPTY;
}
@Override
protected boolean shouldRetainRecord(RecordBatch recordBatch, Record record) {
return record.key() != null;
}
}, ByteBuffer.allocate(1024), Integer.MAX_VALUE, BufferSupplier.NO_CACHING);
result.output.flip();
MemoryRecords compactedRecords = MemoryRecords.readableRecords(result.output);
subscriptions.assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(fullFetchResponse(tp0, compactedRecords, Errors.NONE, 100L, 0));
consumerClient.poll(0);
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> allFetchedRecords = fetcher.fetchedRecords();
assertTrue(allFetchedRecords.containsKey(tp0));
List<ConsumerRecord<byte[], byte[]>> fetchedRecords = allFetchedRecords.get(tp0);
assertEquals(3, fetchedRecords.size());
for (int i = 0; i < 3; i++) {
assertEquals(Integer.toString(i), new String(fetchedRecords.get(i).key()));
}
// The next offset should point to the next batch
assertEquals(4L, subscriptions.position(tp0).longValue());
}
use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testFetchResponseMetricsPartialResponse.
@Test
public void testFetchResponseMetricsPartialResponse() {
subscriptions.assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 1);
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
int expectedBytes = 0;
for (Record record : records.records()) {
if (record.offset() >= 1)
expectedBytes += record.sizeInBytes();
}
fetchRecords(tp0, records, Errors.NONE, 100L, 0);
assertEquals(expectedBytes, fetchSizeAverage.value(), EPSILON);
assertEquals(2, recordsCountAverage.value(), EPSILON);
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class RecordsSerdeTest method testSerdeRecords.
@Test
public void testSerdeRecords() throws Exception {
MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("foo".getBytes()), new SimpleRecord("bar".getBytes()));
SimpleRecordsMessageData message = new SimpleRecordsMessageData().setTopic("foo").setRecordSet(records);
testAllRoundTrips(message);
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class CompressedRecordBatchValidationBenchmark method measureValidateMessagesAndAssignOffsetsCompressed.
@Benchmark
public void measureValidateMessagesAndAssignOffsetsCompressed(Blackhole bh) {
MemoryRecords records = MemoryRecords.readableRecords(singleBatchBuffer.duplicate());
LogValidator.validateMessagesAndAssignOffsetsCompressed(records, new TopicPartition("a", 0), new LongRef(startingOffset), Time.SYSTEM, System.currentTimeMillis(), CompressionCodec.getCompressionCodec(compressionType.id), CompressionCodec.getCompressionCodec(compressionType.id), false, messageVersion, TimestampType.CREATE_TIME, Long.MAX_VALUE, 0, new AppendOrigin.Client$(), ApiVersion.latestVersion(), brokerTopicStats, requestLocal);
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class PartitionMakeFollowerBenchmark method setup.
@Setup(Level.Trial)
public void setup() throws IOException {
if (!logDir.mkdir())
throw new IOException("error creating test directory");
scheduler.startup();
LogConfig logConfig = createLogConfig();
BrokerTopicStats brokerTopicStats = new BrokerTopicStats();
LogDirFailureChannel logDirFailureChannel = Mockito.mock(LogDirFailureChannel.class);
logManager = new LogManagerBuilder().setLogDirs(Collections.singletonList(logDir)).setInitialOfflineDirs(Collections.emptyList()).setConfigRepository(new MockConfigRepository()).setInitialDefaultConfig(logConfig).setCleanerConfig(new CleanerConfig(0, 0, 0, 0, 0, 0.0, 0, false, "MD5")).setRecoveryThreadsPerDataDir(1).setFlushCheckMs(1000L).setFlushRecoveryOffsetCheckpointMs(10000L).setFlushStartOffsetCheckpointMs(10000L).setRetentionCheckMs(1000L).setMaxPidExpirationMs(60000).setInterBrokerProtocolVersion(ApiVersion.latestVersion()).setScheduler(scheduler).setBrokerTopicStats(brokerTopicStats).setLogDirFailureChannel(logDirFailureChannel).setTime(Time.SYSTEM).setKeepPartitionMetadataFile(true).build();
TopicPartition tp = new TopicPartition("topic", 0);
topicId = OptionConverters.toScala(Optional.of(Uuid.randomUuid()));
Mockito.when(offsetCheckpoints.fetch(logDir.getAbsolutePath(), tp)).thenReturn(Option.apply(0L));
IsrChangeListener isrChangeListener = Mockito.mock(IsrChangeListener.class);
AlterIsrManager alterIsrManager = Mockito.mock(AlterIsrManager.class);
partition = new Partition(tp, 100, ApiVersion$.MODULE$.latestVersion(), 0, Time.SYSTEM, isrChangeListener, delayedOperations, Mockito.mock(MetadataCache.class), logManager, alterIsrManager);
partition.createLogIfNotExists(true, false, offsetCheckpoints, topicId);
executorService.submit((Runnable) () -> {
SimpleRecord[] simpleRecords = new SimpleRecord[] { new SimpleRecord(1L, "foo".getBytes(StandardCharsets.UTF_8), "1".getBytes(StandardCharsets.UTF_8)), new SimpleRecord(2L, "bar".getBytes(StandardCharsets.UTF_8), "2".getBytes(StandardCharsets.UTF_8)) };
int initialOffSet = 0;
while (true) {
MemoryRecords memoryRecords = MemoryRecords.withRecords(initialOffSet, CompressionType.NONE, 0, simpleRecords);
partition.appendRecordsToFollowerOrFutureReplica(memoryRecords, false);
initialOffSet = initialOffSet + 2;
}
});
}
Aggregations