Search in sources :

Example 71 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testUpdatePositionWithLastRecordMissingFromBatch.

@Test
public void testUpdatePositionWithLastRecordMissingFromBatch() {
    MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("0".getBytes(), "v".getBytes()), new SimpleRecord("1".getBytes(), "v".getBytes()), new SimpleRecord("2".getBytes(), "v".getBytes()), new SimpleRecord(null, "value".getBytes()));
    // Remove the last record to simulate compaction
    MemoryRecords.FilterResult result = records.filterTo(tp0, new MemoryRecords.RecordFilter() {

        @Override
        protected BatchRetention checkBatchRetention(RecordBatch batch) {
            return BatchRetention.DELETE_EMPTY;
        }

        @Override
        protected boolean shouldRetainRecord(RecordBatch recordBatch, Record record) {
            return record.key() != null;
        }
    }, ByteBuffer.allocate(1024), Integer.MAX_VALUE, BufferSupplier.NO_CACHING);
    result.output.flip();
    MemoryRecords compactedRecords = MemoryRecords.readableRecords(result.output);
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    assertEquals(1, fetcher.sendFetches());
    client.prepareResponse(fullFetchResponse(tp0, compactedRecords, Errors.NONE, 100L, 0));
    consumerClient.poll(0);
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> allFetchedRecords = fetcher.fetchedRecords();
    assertTrue(allFetchedRecords.containsKey(tp0));
    List<ConsumerRecord<byte[], byte[]>> fetchedRecords = allFetchedRecords.get(tp0);
    assertEquals(3, fetchedRecords.size());
    for (int i = 0; i < 3; i++) {
        assertEquals(Integer.toString(i), new String(fetchedRecords.get(i).key()));
    }
    // The next offset should point to the next batch
    assertEquals(4L, subscriptions.position(tp0).longValue());
}
Also used : DefaultRecordBatch(org.apache.kafka.common.record.DefaultRecordBatch) RecordBatch(org.apache.kafka.common.record.RecordBatch) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) Record(org.apache.kafka.common.record.Record) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) List(java.util.List) ArrayList(java.util.ArrayList) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.Test)

Example 72 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testFetchResponseMetricsPartialResponse.

@Test
public void testFetchResponseMetricsPartialResponse() {
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 1);
    Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
    KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
    KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
    for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
    MemoryRecords records = builder.build();
    int expectedBytes = 0;
    for (Record record : records.records()) {
        if (record.offset() >= 1)
            expectedBytes += record.sizeInBytes();
    }
    fetchRecords(tp0, records, Errors.NONE, 100L, 0);
    assertEquals(expectedBytes, fetchSizeAverage.value(), EPSILON);
    assertEquals(2, recordsCountAverage.value(), EPSILON);
}
Also used : MetricName(org.apache.kafka.common.MetricName) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) Record(org.apache.kafka.common.record.Record) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.Test)

Example 73 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class RecordsSerdeTest method testSerdeRecords.

@Test
public void testSerdeRecords() throws Exception {
    MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("foo".getBytes()), new SimpleRecord("bar".getBytes()));
    SimpleRecordsMessageData message = new SimpleRecordsMessageData().setTopic("foo").setRecordSet(records);
    testAllRoundTrips(message);
}
Also used : SimpleRecord(org.apache.kafka.common.record.SimpleRecord) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Example 74 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class CompressedRecordBatchValidationBenchmark method measureValidateMessagesAndAssignOffsetsCompressed.

@Benchmark
public void measureValidateMessagesAndAssignOffsetsCompressed(Blackhole bh) {
    MemoryRecords records = MemoryRecords.readableRecords(singleBatchBuffer.duplicate());
    LogValidator.validateMessagesAndAssignOffsetsCompressed(records, new TopicPartition("a", 0), new LongRef(startingOffset), Time.SYSTEM, System.currentTimeMillis(), CompressionCodec.getCompressionCodec(compressionType.id), CompressionCodec.getCompressionCodec(compressionType.id), false, messageVersion, TimestampType.CREATE_TIME, Long.MAX_VALUE, 0, new AppendOrigin.Client$(), ApiVersion.latestVersion(), brokerTopicStats, requestLocal);
}
Also used : AppendOrigin(kafka.log.AppendOrigin) TopicPartition(org.apache.kafka.common.TopicPartition) LongRef(kafka.common.LongRef) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Benchmark(org.openjdk.jmh.annotations.Benchmark)

Example 75 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class PartitionMakeFollowerBenchmark method setup.

@Setup(Level.Trial)
public void setup() throws IOException {
    if (!logDir.mkdir())
        throw new IOException("error creating test directory");
    scheduler.startup();
    LogConfig logConfig = createLogConfig();
    BrokerTopicStats brokerTopicStats = new BrokerTopicStats();
    LogDirFailureChannel logDirFailureChannel = Mockito.mock(LogDirFailureChannel.class);
    logManager = new LogManagerBuilder().setLogDirs(Collections.singletonList(logDir)).setInitialOfflineDirs(Collections.emptyList()).setConfigRepository(new MockConfigRepository()).setInitialDefaultConfig(logConfig).setCleanerConfig(new CleanerConfig(0, 0, 0, 0, 0, 0.0, 0, false, "MD5")).setRecoveryThreadsPerDataDir(1).setFlushCheckMs(1000L).setFlushRecoveryOffsetCheckpointMs(10000L).setFlushStartOffsetCheckpointMs(10000L).setRetentionCheckMs(1000L).setMaxPidExpirationMs(60000).setInterBrokerProtocolVersion(ApiVersion.latestVersion()).setScheduler(scheduler).setBrokerTopicStats(brokerTopicStats).setLogDirFailureChannel(logDirFailureChannel).setTime(Time.SYSTEM).setKeepPartitionMetadataFile(true).build();
    TopicPartition tp = new TopicPartition("topic", 0);
    topicId = OptionConverters.toScala(Optional.of(Uuid.randomUuid()));
    Mockito.when(offsetCheckpoints.fetch(logDir.getAbsolutePath(), tp)).thenReturn(Option.apply(0L));
    IsrChangeListener isrChangeListener = Mockito.mock(IsrChangeListener.class);
    AlterIsrManager alterIsrManager = Mockito.mock(AlterIsrManager.class);
    partition = new Partition(tp, 100, ApiVersion$.MODULE$.latestVersion(), 0, Time.SYSTEM, isrChangeListener, delayedOperations, Mockito.mock(MetadataCache.class), logManager, alterIsrManager);
    partition.createLogIfNotExists(true, false, offsetCheckpoints, topicId);
    executorService.submit((Runnable) () -> {
        SimpleRecord[] simpleRecords = new SimpleRecord[] { new SimpleRecord(1L, "foo".getBytes(StandardCharsets.UTF_8), "1".getBytes(StandardCharsets.UTF_8)), new SimpleRecord(2L, "bar".getBytes(StandardCharsets.UTF_8), "2".getBytes(StandardCharsets.UTF_8)) };
        int initialOffSet = 0;
        while (true) {
            MemoryRecords memoryRecords = MemoryRecords.withRecords(initialOffSet, CompressionType.NONE, 0, simpleRecords);
            partition.appendRecordsToFollowerOrFutureReplica(memoryRecords, false);
            initialOffSet = initialOffSet + 2;
        }
    });
}
Also used : Partition(kafka.cluster.Partition) TopicPartition(org.apache.kafka.common.TopicPartition) IsrChangeListener(kafka.cluster.IsrChangeListener) LogManagerBuilder(kafka.server.builders.LogManagerBuilder) IOException(java.io.IOException) LogDirFailureChannel(kafka.server.LogDirFailureChannel) CleanerConfig(kafka.log.CleanerConfig) AlterIsrManager(kafka.server.AlterIsrManager) BrokerTopicStats(kafka.server.BrokerTopicStats) MockConfigRepository(kafka.server.metadata.MockConfigRepository) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) LogConfig(kafka.log.LogConfig) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Setup(org.openjdk.jmh.annotations.Setup)

Aggregations

MemoryRecords (org.apache.kafka.common.record.MemoryRecords)108 TopicPartition (org.apache.kafka.common.TopicPartition)59 Test (org.junit.jupiter.api.Test)43 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)40 ByteBuffer (java.nio.ByteBuffer)34 ArrayList (java.util.ArrayList)28 List (java.util.List)27 Test (org.junit.Test)27 HashMap (java.util.HashMap)26 LinkedHashMap (java.util.LinkedHashMap)23 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)23 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)18 FetchResponseData (org.apache.kafka.common.message.FetchResponseData)16 Collections.singletonList (java.util.Collections.singletonList)15 Record (org.apache.kafka.common.record.Record)15 Arrays.asList (java.util.Arrays.asList)14 Collections.emptyList (java.util.Collections.emptyList)14 ByteArrayDeserializer (org.apache.kafka.common.serialization.ByteArrayDeserializer)14 Metrics (org.apache.kafka.common.metrics.Metrics)12 MutableRecordBatch (org.apache.kafka.common.record.MutableRecordBatch)11