Search in sources :

Example 11 with SimpleRecord

use of org.apache.kafka.common.record.SimpleRecord in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testReturnCommittedTransactions.

@Test
public void testReturnCommittedTransactions() {
    Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(), new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    int currentOffset = 0;
    currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()));
    currentOffset += commitTransaction(buffer, 1L, currentOffset);
    buffer.flip();
    List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
    MemoryRecords records = MemoryRecords.readableRecords(buffer);
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(new MockClient.RequestMatcher() {

        @Override
        public boolean matches(AbstractRequest body) {
            FetchRequest request = (FetchRequest) body;
            assertEquals(IsolationLevel.READ_COMMITTED, request.isolationLevel());
            return true;
        }
    }, fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
    consumerClient.poll(0);
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetcher.fetchedRecords();
    assertTrue(fetchedRecords.containsKey(tp0));
    assertEquals(fetchedRecords.get(tp0).size(), 2);
}
Also used : AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) ArrayList(java.util.ArrayList) ByteBuffer(java.nio.ByteBuffer) Metrics(org.apache.kafka.common.metrics.Metrics) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) FetchRequest(org.apache.kafka.common.requests.FetchRequest) List(java.util.List) ArrayList(java.util.ArrayList) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.Test)

Example 12 with SimpleRecord

use of org.apache.kafka.common.record.SimpleRecord in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testMultipleAbortMarkers.

@Test
public void testMultipleAbortMarkers() {
    Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(), new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    int currentOffset = 0;
    currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "abort1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "abort1-2".getBytes(), "value".getBytes()));
    currentOffset += abortTransaction(buffer, 1L, currentOffset);
    // Duplicate abort -- should be ignored.
    currentOffset += abortTransaction(buffer, 1L, currentOffset);
    // Now commit a transaction.
    currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "commit1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "commit1-2".getBytes(), "value".getBytes()));
    commitTransaction(buffer, 1L, currentOffset);
    buffer.flip();
    List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
    abortedTransactions.add(new FetchResponse.AbortedTransaction(1, 0));
    MemoryRecords records = MemoryRecords.readableRecords(buffer);
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
    consumerClient.poll(0);
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetcher.fetchedRecords();
    assertTrue(fetchedRecords.containsKey(tp0));
    assertEquals(fetchedRecords.get(tp0).size(), 2);
    List<ConsumerRecord<byte[], byte[]>> fetchedConsumerRecords = fetchedRecords.get(tp0);
    Set<String> committedKeys = new HashSet<>(Arrays.asList("commit1-1", "commit1-2"));
    Set<String> actuallyCommittedKeys = new HashSet<>();
    for (ConsumerRecord<byte[], byte[]> consumerRecord : fetchedConsumerRecords) {
        actuallyCommittedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8));
    }
    assertTrue(actuallyCommittedKeys.equals(committedKeys));
}
Also used : ArrayList(java.util.ArrayList) FetchResponse(org.apache.kafka.common.requests.FetchResponse) ByteBuffer(java.nio.ByteBuffer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Metrics(org.apache.kafka.common.metrics.Metrics) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) List(java.util.List) ArrayList(java.util.ArrayList) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 13 with SimpleRecord

use of org.apache.kafka.common.record.SimpleRecord in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testFetchResponseMetricsWithOnePartitionAtTheWrongOffset.

@Test
public void testFetchResponseMetricsWithOnePartitionAtTheWrongOffset() {
    subscriptions.assignFromUser(Utils.mkSet(tp0, tp1));
    subscriptions.seek(tp0, 0);
    subscriptions.seek(tp1, 0);
    Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
    KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
    KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
    // send the fetch and then seek to a new offset
    assertEquals(1, fetcher.sendFetches());
    subscriptions.seek(tp1, 5);
    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
    for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
    MemoryRecords records = builder.build();
    Map<TopicPartition, FetchResponse.PartitionData> partitions = new HashMap<>();
    partitions.put(tp0, new FetchResponse.PartitionData(Errors.NONE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records));
    partitions.put(tp1, new FetchResponse.PartitionData(Errors.NONE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("val".getBytes()))));
    client.prepareResponse(new FetchResponse(Errors.NONE, new LinkedHashMap<>(partitions), 0, INVALID_SESSION_ID));
    consumerClient.poll(0);
    fetcher.fetchedRecords();
    // we should have ignored the record at the wrong offset
    int expectedBytes = 0;
    for (Record record : records.records()) expectedBytes += record.sizeInBytes();
    assertEquals(expectedBytes, fetchSizeAverage.value(), EPSILON);
    assertEquals(3, recordsCountAverage.value(), EPSILON);
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) FetchResponse(org.apache.kafka.common.requests.FetchResponse) LinkedHashMap(java.util.LinkedHashMap) MetricName(org.apache.kafka.common.MetricName) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) Record(org.apache.kafka.common.record.Record) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.Test)

Example 14 with SimpleRecord

use of org.apache.kafka.common.record.SimpleRecord in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method appendTransactionalRecords.

private int appendTransactionalRecords(ByteBuffer buffer, long pid, long baseOffset, int baseSequence, SimpleRecord... records) {
    MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, baseOffset, time.milliseconds(), pid, (short) 0, baseSequence, true, RecordBatch.NO_PARTITION_LEADER_EPOCH);
    for (SimpleRecord record : records) {
        builder.append(record);
    }
    builder.build();
    return records.length;
}
Also used : SimpleRecord(org.apache.kafka.common.record.SimpleRecord) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder)

Example 15 with SimpleRecord

use of org.apache.kafka.common.record.SimpleRecord in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testUpdatePositionWithLastRecordMissingFromBatch.

@Test
public void testUpdatePositionWithLastRecordMissingFromBatch() {
    MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("0".getBytes(), "v".getBytes()), new SimpleRecord("1".getBytes(), "v".getBytes()), new SimpleRecord("2".getBytes(), "v".getBytes()), new SimpleRecord(null, "value".getBytes()));
    // Remove the last record to simulate compaction
    MemoryRecords.FilterResult result = records.filterTo(tp0, new MemoryRecords.RecordFilter() {

        @Override
        protected BatchRetention checkBatchRetention(RecordBatch batch) {
            return BatchRetention.DELETE_EMPTY;
        }

        @Override
        protected boolean shouldRetainRecord(RecordBatch recordBatch, Record record) {
            return record.key() != null;
        }
    }, ByteBuffer.allocate(1024), Integer.MAX_VALUE, BufferSupplier.NO_CACHING);
    result.output.flip();
    MemoryRecords compactedRecords = MemoryRecords.readableRecords(result.output);
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    assertEquals(1, fetcher.sendFetches());
    client.prepareResponse(fullFetchResponse(tp0, compactedRecords, Errors.NONE, 100L, 0));
    consumerClient.poll(0);
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> allFetchedRecords = fetcher.fetchedRecords();
    assertTrue(allFetchedRecords.containsKey(tp0));
    List<ConsumerRecord<byte[], byte[]>> fetchedRecords = allFetchedRecords.get(tp0);
    assertEquals(3, fetchedRecords.size());
    for (int i = 0; i < 3; i++) {
        assertEquals(Integer.toString(i), new String(fetchedRecords.get(i).key()));
    }
    // The next offset should point to the next batch
    assertEquals(4L, subscriptions.position(tp0).longValue());
}
Also used : DefaultRecordBatch(org.apache.kafka.common.record.DefaultRecordBatch) RecordBatch(org.apache.kafka.common.record.RecordBatch) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) Record(org.apache.kafka.common.record.Record) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) List(java.util.List) ArrayList(java.util.ArrayList) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.Test)

Aggregations

SimpleRecord (org.apache.kafka.common.record.SimpleRecord)15 TopicPartition (org.apache.kafka.common.TopicPartition)13 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)12 Test (org.junit.Test)11 ByteBuffer (java.nio.ByteBuffer)9 ArrayList (java.util.ArrayList)9 List (java.util.List)9 Metrics (org.apache.kafka.common.metrics.Metrics)8 FetchResponse (org.apache.kafka.common.requests.FetchResponse)8 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)6 ByteArrayDeserializer (org.apache.kafka.common.serialization.ByteArrayDeserializer)6 LinkedHashMap (java.util.LinkedHashMap)3 HashSet (java.util.HashSet)2 LegacyRecord (org.apache.kafka.common.record.LegacyRecord)2 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)2 Record (org.apache.kafka.common.record.Record)2 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)2 HashMap (java.util.HashMap)1 MockClient (org.apache.kafka.clients.MockClient)1 KafkaException (org.apache.kafka.common.KafkaException)1