Search in sources :

Example 41 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.

the class StandbyTaskTest method testUpdateNonPersistentStore.

@SuppressWarnings("unchecked")
@Test(expected = ProcessorStateException.class)
public void testUpdateNonPersistentStore() throws IOException {
    StreamsConfig config = createConfig(baseDir);
    StandbyTask task = new StandbyTask(taskId, topicPartitions, topology, consumer, changelogReader, config, null, stateDirectory);
    restoreStateConsumer.assign(new ArrayList<>(task.checkpointedOffsets().keySet()));
    task.update(partition1, records(new ConsumerRecord<>(partition1.topic(), partition1.partition(), 10, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue)));
}
Also used : ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) StreamsConfig(org.apache.kafka.streams.StreamsConfig) InternalStreamsBuilderTest(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest) Test(org.junit.Test)

Example 42 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testConsumingViaIncrementalFetchRequests.

@Test
public void testConsumingViaIncrementalFetchRequests() {
    Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(time), 2);
    List<ConsumerRecord<byte[], byte[]>> records;
    subscriptions.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1)));
    subscriptions.seek(tp0, 0);
    subscriptions.seek(tp1, 1);
    // Fetch some records and establish an incremental fetch session.
    LinkedHashMap<TopicPartition, FetchResponse.PartitionData> partitions1 = new LinkedHashMap<>();
    partitions1.put(tp0, new FetchResponse.PartitionData(Errors.NONE, 2L, 2, 0L, null, this.records));
    partitions1.put(tp1, new FetchResponse.PartitionData(Errors.NONE, 100L, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, emptyRecords));
    FetchResponse resp1 = new FetchResponse(Errors.NONE, partitions1, 0, 123);
    client.prepareResponse(resp1);
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    consumerClient.poll(0);
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetcher.fetchedRecords();
    assertFalse(fetchedRecords.containsKey(tp1));
    records = fetchedRecords.get(tp0);
    assertEquals(2, records.size());
    assertEquals(3L, subscriptions.position(tp0).longValue());
    assertEquals(1L, subscriptions.position(tp1).longValue());
    assertEquals(1, records.get(0).offset());
    assertEquals(2, records.get(1).offset());
    // There is still a buffered record.
    assertEquals(0, fetcher.sendFetches());
    fetchedRecords = fetcher.fetchedRecords();
    assertFalse(fetchedRecords.containsKey(tp1));
    records = fetchedRecords.get(tp0);
    assertEquals(1, records.size());
    assertEquals(3, records.get(0).offset());
    assertEquals(4L, subscriptions.position(tp0).longValue());
    // The second response contains no new records.
    LinkedHashMap<TopicPartition, FetchResponse.PartitionData> partitions2 = new LinkedHashMap<>();
    FetchResponse resp2 = new FetchResponse(Errors.NONE, partitions2, 0, 123);
    client.prepareResponse(resp2);
    assertEquals(1, fetcher.sendFetches());
    consumerClient.poll(0);
    fetchedRecords = fetcher.fetchedRecords();
    assertTrue(fetchedRecords.isEmpty());
    assertEquals(4L, subscriptions.position(tp0).longValue());
    assertEquals(1L, subscriptions.position(tp1).longValue());
    // The third response contains some new records for tp0.
    LinkedHashMap<TopicPartition, FetchResponse.PartitionData> partitions3 = new LinkedHashMap<>();
    partitions3.put(tp0, new FetchResponse.PartitionData(Errors.NONE, 100L, 4, 0L, null, this.nextRecords));
    new FetchResponse(Errors.NONE, new LinkedHashMap<>(partitions1), 0, INVALID_SESSION_ID);
    FetchResponse resp3 = new FetchResponse(Errors.NONE, partitions3, 0, 123);
    client.prepareResponse(resp3);
    assertEquals(1, fetcher.sendFetches());
    consumerClient.poll(0);
    fetchedRecords = fetcher.fetchedRecords();
    assertFalse(fetchedRecords.containsKey(tp1));
    records = fetchedRecords.get(tp0);
    assertEquals(2, records.size());
    assertEquals(6L, subscriptions.position(tp0).longValue());
    assertEquals(1L, subscriptions.position(tp1).longValue());
    assertEquals(4, records.get(0).offset());
    assertEquals(5, records.get(1).offset());
}
Also used : FetchResponse(org.apache.kafka.common.requests.FetchResponse) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) LinkedHashMap(java.util.LinkedHashMap) Metrics(org.apache.kafka.common.metrics.Metrics) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) List(java.util.List) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 43 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testReadCommittedAbortMarkerWithNoData.

@Test
public void testReadCommittedAbortMarkerWithNoData() {
    Fetcher<String, String> fetcher = createFetcher(subscriptions, new Metrics(), new StringDeserializer(), new StringDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    long producerId = 1L;
    abortTransaction(buffer, producerId, 5L);
    appendTransactionalRecords(buffer, producerId, 6L, new SimpleRecord("6".getBytes(), null), new SimpleRecord("7".getBytes(), null), new SimpleRecord("8".getBytes(), null));
    commitTransaction(buffer, producerId, 9L);
    buffer.flip();
    // send the fetch
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    assertEquals(1, fetcher.sendFetches());
    // prepare the response. the aborted transactions begin at offsets which are no longer in the log
    List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
    abortedTransactions.add(new FetchResponse.AbortedTransaction(producerId, 0L));
    client.prepareResponse(fullFetchResponseWithAbortedTransactions(MemoryRecords.readableRecords(buffer), abortedTransactions, Errors.NONE, 100L, 100L, 0));
    consumerClient.poll(0);
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<String, String>>> allFetchedRecords = fetcher.fetchedRecords();
    assertTrue(allFetchedRecords.containsKey(tp0));
    List<ConsumerRecord<String, String>> fetchedRecords = allFetchedRecords.get(tp0);
    assertEquals(3, fetchedRecords.size());
    assertEquals(Arrays.asList(6L, 7L, 8L), collectRecordOffsets(fetchedRecords));
}
Also used : StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) ArrayList(java.util.ArrayList) FetchResponse(org.apache.kafka.common.requests.FetchResponse) ByteBuffer(java.nio.ByteBuffer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Metrics(org.apache.kafka.common.metrics.Metrics) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) List(java.util.List) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 44 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testFetchMaxPollRecords.

@Test
public void testFetchMaxPollRecords() {
    Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(time), 2);
    List<ConsumerRecord<byte[], byte[]>> records;
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 1);
    client.prepareResponse(matchesOffset(tp0, 1), fullFetchResponse(tp0, this.records, Errors.NONE, 100L, 0));
    client.prepareResponse(matchesOffset(tp0, 4), fullFetchResponse(tp0, this.nextRecords, Errors.NONE, 100L, 0));
    assertEquals(1, fetcher.sendFetches());
    consumerClient.poll(0);
    records = fetcher.fetchedRecords().get(tp0);
    assertEquals(2, records.size());
    assertEquals(3L, subscriptions.position(tp0).longValue());
    assertEquals(1, records.get(0).offset());
    assertEquals(2, records.get(1).offset());
    assertEquals(0, fetcher.sendFetches());
    consumerClient.poll(0);
    records = fetcher.fetchedRecords().get(tp0);
    assertEquals(1, records.size());
    assertEquals(4L, subscriptions.position(tp0).longValue());
    assertEquals(3, records.get(0).offset());
    assertTrue(fetcher.sendFetches() > 0);
    consumerClient.poll(0);
    records = fetcher.fetchedRecords().get(tp0);
    assertEquals(2, records.size());
    assertEquals(6L, subscriptions.position(tp0).longValue());
    assertEquals(4, records.get(0).offset());
    assertEquals(5, records.get(1).offset());
}
Also used : Metrics(org.apache.kafka.common.metrics.Metrics) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.Test)

Example 45 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testReadCommittedWithCompactedTopic.

@Test
public void testReadCommittedWithCompactedTopic() {
    Fetcher<String, String> fetcher = createFetcher(subscriptions, new Metrics(), new StringDeserializer(), new StringDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    long pid1 = 1L;
    long pid2 = 2L;
    long pid3 = 3L;
    appendTransactionalRecords(buffer, pid3, 3L, new SimpleRecord("3".getBytes(), "value".getBytes()), new SimpleRecord("4".getBytes(), "value".getBytes()));
    appendTransactionalRecords(buffer, pid2, 15L, new SimpleRecord("15".getBytes(), "value".getBytes()), new SimpleRecord("16".getBytes(), "value".getBytes()), new SimpleRecord("17".getBytes(), "value".getBytes()));
    appendTransactionalRecords(buffer, pid1, 22L, new SimpleRecord("22".getBytes(), "value".getBytes()), new SimpleRecord("23".getBytes(), "value".getBytes()));
    abortTransaction(buffer, pid2, 28L);
    appendTransactionalRecords(buffer, pid3, 30L, new SimpleRecord("30".getBytes(), "value".getBytes()), new SimpleRecord("31".getBytes(), "value".getBytes()), new SimpleRecord("32".getBytes(), "value".getBytes()));
    commitTransaction(buffer, pid3, 35L);
    appendTransactionalRecords(buffer, pid1, 39L, new SimpleRecord("39".getBytes(), "value".getBytes()), new SimpleRecord("40".getBytes(), "value".getBytes()));
    // transaction from pid1 is aborted, but the marker is not included in the fetch
    buffer.flip();
    // send the fetch
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    assertEquals(1, fetcher.sendFetches());
    // prepare the response. the aborted transactions begin at offsets which are no longer in the log
    List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
    abortedTransactions.add(new FetchResponse.AbortedTransaction(pid2, 6L));
    abortedTransactions.add(new FetchResponse.AbortedTransaction(pid1, 0L));
    client.prepareResponse(fullFetchResponseWithAbortedTransactions(MemoryRecords.readableRecords(buffer), abortedTransactions, Errors.NONE, 100L, 100L, 0));
    consumerClient.poll(0);
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<String, String>>> allFetchedRecords = fetcher.fetchedRecords();
    assertTrue(allFetchedRecords.containsKey(tp0));
    List<ConsumerRecord<String, String>> fetchedRecords = allFetchedRecords.get(tp0);
    assertEquals(5, fetchedRecords.size());
    assertEquals(Arrays.asList(3L, 4L, 30L, 31L, 32L), collectRecordOffsets(fetchedRecords));
}
Also used : StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) ArrayList(java.util.ArrayList) FetchResponse(org.apache.kafka.common.requests.FetchResponse) ByteBuffer(java.nio.ByteBuffer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Metrics(org.apache.kafka.common.metrics.Metrics) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) List(java.util.List) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Aggregations

ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)309 TopicPartition (org.apache.kafka.common.TopicPartition)158 Test (org.junit.Test)145 ArrayList (java.util.ArrayList)120 List (java.util.List)99 HashMap (java.util.HashMap)97 Map (java.util.Map)70 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)61 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)51 Test (org.junit.jupiter.api.Test)35 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)33 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)31 LinkedHashMap (java.util.LinkedHashMap)30 Header (org.apache.kafka.common.header.Header)29 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)28 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)28 TimeUnit (java.util.concurrent.TimeUnit)27 Set (java.util.Set)24 Collectors (java.util.stream.Collectors)24 ByteBuffer (java.nio.ByteBuffer)22