Search in sources :

Example 16 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testReturnAbortedTransactionsinUncommittedMode.

@Test
public void testReturnAbortedTransactionsinUncommittedMode() {
    Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(), new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED);
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    int currentOffset = 0;
    currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()));
    abortTransaction(buffer, 1L, currentOffset);
    buffer.flip();
    List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
    abortedTransactions.add(new FetchResponse.AbortedTransaction(1, 0));
    MemoryRecords records = MemoryRecords.readableRecords(buffer);
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
    consumerClient.poll(0);
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetcher.fetchedRecords();
    assertTrue(fetchedRecords.containsKey(tp0));
}
Also used : ArrayList(java.util.ArrayList) FetchResponse(org.apache.kafka.common.requests.FetchResponse) ByteBuffer(java.nio.ByteBuffer) Metrics(org.apache.kafka.common.metrics.Metrics) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) List(java.util.List) ArrayList(java.util.ArrayList) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.Test)

Example 17 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testFetchResponseMetricsWithOnePartitionError.

@Test
public void testFetchResponseMetricsWithOnePartitionError() {
    subscriptions.assignFromUser(Utils.mkSet(tp0, tp1));
    subscriptions.seek(tp0, 0);
    subscriptions.seek(tp1, 0);
    Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
    KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
    KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
    for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
    MemoryRecords records = builder.build();
    Map<TopicPartition, FetchResponse.PartitionData> partitions = new HashMap<>();
    partitions.put(tp0, new FetchResponse.PartitionData(Errors.NONE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records));
    partitions.put(tp1, new FetchResponse.PartitionData(Errors.OFFSET_OUT_OF_RANGE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, MemoryRecords.EMPTY));
    assertEquals(1, fetcher.sendFetches());
    client.prepareResponse(new FetchResponse(Errors.NONE, new LinkedHashMap<>(partitions), 0, INVALID_SESSION_ID));
    consumerClient.poll(0);
    fetcher.fetchedRecords();
    int expectedBytes = 0;
    for (Record record : records.records()) expectedBytes += record.sizeInBytes();
    assertEquals(expectedBytes, fetchSizeAverage.value(), EPSILON);
    assertEquals(3, recordsCountAverage.value(), EPSILON);
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) FetchResponse(org.apache.kafka.common.requests.FetchResponse) LinkedHashMap(java.util.LinkedHashMap) MetricName(org.apache.kafka.common.MetricName) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) Record(org.apache.kafka.common.record.Record) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.Test)

Example 18 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testConsumerPositionUpdatedWhenSkippingAbortedTransactions.

@Test
public void testConsumerPositionUpdatedWhenSkippingAbortedTransactions() {
    Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(), new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    long currentOffset = 0;
    currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "abort1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "abort1-2".getBytes(), "value".getBytes()));
    currentOffset += abortTransaction(buffer, 1L, currentOffset);
    buffer.flip();
    List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
    abortedTransactions.add(new FetchResponse.AbortedTransaction(1, 0));
    MemoryRecords records = MemoryRecords.readableRecords(buffer);
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
    consumerClient.poll(0);
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetcher.fetchedRecords();
    // Ensure that we don't return any of the aborted records, but yet advance the consumer position.
    assertFalse(fetchedRecords.containsKey(tp0));
    assertEquals(currentOffset, (long) subscriptions.position(tp0));
}
Also used : ArrayList(java.util.ArrayList) FetchResponse(org.apache.kafka.common.requests.FetchResponse) ByteBuffer(java.nio.ByteBuffer) Metrics(org.apache.kafka.common.metrics.Metrics) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) List(java.util.List) ArrayList(java.util.ArrayList) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.Test)

Example 19 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testUpdatePositionOnEmptyBatch.

@Test
public void testUpdatePositionOnEmptyBatch() {
    long producerId = 1;
    short producerEpoch = 0;
    int sequence = 1;
    long baseOffset = 37;
    long lastOffset = 54;
    int partitionLeaderEpoch = 7;
    ByteBuffer buffer = ByteBuffer.allocate(DefaultRecordBatch.RECORD_BATCH_OVERHEAD);
    DefaultRecordBatch.writeEmptyHeader(buffer, RecordBatch.CURRENT_MAGIC_VALUE, producerId, producerEpoch, sequence, baseOffset, lastOffset, partitionLeaderEpoch, TimestampType.CREATE_TIME, System.currentTimeMillis(), false, false);
    buffer.flip();
    MemoryRecords recordsWithEmptyBatch = MemoryRecords.readableRecords(buffer);
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    assertEquals(1, fetcher.sendFetches());
    client.prepareResponse(fullFetchResponse(tp0, recordsWithEmptyBatch, Errors.NONE, 100L, 0));
    consumerClient.poll(0);
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> allFetchedRecords = fetcher.fetchedRecords();
    assertTrue(allFetchedRecords.isEmpty());
    // The next offset should point to the next batch
    assertEquals(lastOffset + 1, subscriptions.position(tp0).longValue());
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) List(java.util.List) ArrayList(java.util.ArrayList) ByteBuffer(java.nio.ByteBuffer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.Test)

Example 20 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testReadCommittedWithCommittedAndAbortedTransactions.

@Test
public void testReadCommittedWithCommittedAndAbortedTransactions() {
    Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(), new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
    long pid1 = 1L;
    long pid2 = 2L;
    // Appends for producer 1 (eventually committed)
    appendTransactionalRecords(buffer, pid1, 0L, new SimpleRecord("commit1-1".getBytes(), "value".getBytes()), new SimpleRecord("commit1-2".getBytes(), "value".getBytes()));
    // Appends for producer 2 (eventually aborted)
    appendTransactionalRecords(buffer, pid2, 2L, new SimpleRecord("abort2-1".getBytes(), "value".getBytes()));
    // commit producer 1
    commitTransaction(buffer, pid1, 3L);
    // append more for producer 2 (eventually aborted)
    appendTransactionalRecords(buffer, pid2, 4L, new SimpleRecord("abort2-2".getBytes(), "value".getBytes()));
    // abort producer 2
    abortTransaction(buffer, pid2, 5L);
    abortedTransactions.add(new FetchResponse.AbortedTransaction(pid2, 2L));
    // New transaction for producer 1 (eventually aborted)
    appendTransactionalRecords(buffer, pid1, 6L, new SimpleRecord("abort1-1".getBytes(), "value".getBytes()));
    // New transaction for producer 2 (eventually committed)
    appendTransactionalRecords(buffer, pid2, 7L, new SimpleRecord("commit2-1".getBytes(), "value".getBytes()));
    // Add messages for producer 1 (eventually aborted)
    appendTransactionalRecords(buffer, pid1, 8L, new SimpleRecord("abort1-2".getBytes(), "value".getBytes()));
    // abort producer 1
    abortTransaction(buffer, pid1, 9L);
    abortedTransactions.add(new FetchResponse.AbortedTransaction(1, 6));
    // commit producer 2
    commitTransaction(buffer, pid2, 10L);
    buffer.flip();
    MemoryRecords records = MemoryRecords.readableRecords(buffer);
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
    consumerClient.poll(0);
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetcher.fetchedRecords();
    assertTrue(fetchedRecords.containsKey(tp0));
    // There are only 3 committed records
    List<ConsumerRecord<byte[], byte[]>> fetchedConsumerRecords = fetchedRecords.get(tp0);
    Set<String> fetchedKeys = new HashSet<>();
    for (ConsumerRecord<byte[], byte[]> consumerRecord : fetchedConsumerRecords) {
        fetchedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8));
    }
    assertEquals(Utils.mkSet("commit1-1", "commit1-2", "commit2-1"), fetchedKeys);
}
Also used : ArrayList(java.util.ArrayList) FetchResponse(org.apache.kafka.common.requests.FetchResponse) ByteBuffer(java.nio.ByteBuffer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Metrics(org.apache.kafka.common.metrics.Metrics) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) List(java.util.List) ArrayList(java.util.ArrayList) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

MemoryRecords (org.apache.kafka.common.record.MemoryRecords)108 TopicPartition (org.apache.kafka.common.TopicPartition)59 Test (org.junit.jupiter.api.Test)43 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)40 ByteBuffer (java.nio.ByteBuffer)34 ArrayList (java.util.ArrayList)28 List (java.util.List)27 Test (org.junit.Test)27 HashMap (java.util.HashMap)26 LinkedHashMap (java.util.LinkedHashMap)23 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)23 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)18 FetchResponseData (org.apache.kafka.common.message.FetchResponseData)16 Collections.singletonList (java.util.Collections.singletonList)15 Record (org.apache.kafka.common.record.Record)15 Arrays.asList (java.util.Arrays.asList)14 Collections.emptyList (java.util.Collections.emptyList)14 ByteArrayDeserializer (org.apache.kafka.common.serialization.ByteArrayDeserializer)14 Metrics (org.apache.kafka.common.metrics.Metrics)12 MutableRecordBatch (org.apache.kafka.common.record.MutableRecordBatch)11