Search in sources :

Example 21 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project ksql by confluentinc.

the class TopicStreamWriterFormatTest method shouldMatchAvroFormatter.

@Test
public void shouldMatchAvroFormatter() throws Exception {
    /**
     * Build an AVRO message
     */
    String USER_SCHEMA = "{\n" + "    \"fields\": [\n" + "        { \"name\": \"str1\", \"type\": \"string\" }\n" + "    ],\n" + "    \"name\": \"myrecord\",\n" + "    \"type\": \"record\"\n" + "}";
    Schema.Parser parser = new Schema.Parser();
    Schema schema = parser.parse(USER_SCHEMA);
    GenericData.Record avroRecord = new GenericData.Record(schema);
    avroRecord.put("str1", "My first string");
    /**
     * Setup expects
     */
    SchemaRegistryClient schemaRegistryClient = mock(SchemaRegistryClient.class);
    expect(schemaRegistryClient.register(anyString(), anyObject())).andReturn(1);
    expect(schemaRegistryClient.getById(anyInt())).andReturn(schema);
    replay(schemaRegistryClient);
    Map<String, String> props = new HashMap<>();
    props.put("schema.registry.url", "localhost:9092");
    KafkaAvroSerializer avroSerializer = new KafkaAvroSerializer(schemaRegistryClient, props);
    /**
     * Test data
     */
    byte[] testRecordBytes = avroSerializer.serialize("topic", avroRecord);
    ConsumerRecord<String, Bytes> record = new ConsumerRecord<String, Bytes>("topic", 1, 1, "key", new Bytes(testRecordBytes));
    /**
     * Assert
     */
    assertTrue(TopicStreamWriter.Format.AVRO.isFormat("topic", record, schemaRegistryClient));
}
Also used : HashMap(java.util.HashMap) Schema(org.apache.avro.Schema) KafkaAvroSerializer(io.confluent.kafka.serializers.KafkaAvroSerializer) GenericData(org.apache.avro.generic.GenericData) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Bytes(org.apache.kafka.common.utils.Bytes) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SchemaRegistryClient(io.confluent.kafka.schemaregistry.client.SchemaRegistryClient) Test(org.junit.Test)

Example 22 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.

the class WorkerSinkTaskThreadedTest method expectRebalanceDuringPoll.

@SuppressWarnings("unchecked")
private IExpectationSetters<Object> expectRebalanceDuringPoll() throws Exception {
    final List<TopicPartition> partitions = Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3);
    final long startOffset = 40L;
    final Map<TopicPartition, Long> offsets = new HashMap<>();
    offsets.put(TOPIC_PARTITION, startOffset);
    EasyMock.expect(consumer.poll(EasyMock.anyLong())).andAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {

        @Override
        public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
            // "Sleep" so time will progress
            time.sleep(1L);
            sinkTaskContext.getValue().offset(offsets);
            rebalanceListener.getValue().onPartitionsAssigned(partitions);
            ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE))));
            recordsReturned++;
            return records;
        }
    });
    EasyMock.expect(consumer.position(TOPIC_PARTITION)).andReturn(FIRST_OFFSET);
    EasyMock.expect(consumer.position(TOPIC_PARTITION2)).andReturn(FIRST_OFFSET);
    EasyMock.expect(consumer.position(TOPIC_PARTITION3)).andReturn(FIRST_OFFSET);
    sinkTask.open(partitions);
    EasyMock.expectLastCall();
    consumer.seek(TOPIC_PARTITION, startOffset);
    EasyMock.expectLastCall();
    EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY));
    EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE));
    sinkTask.put(EasyMock.anyObject(Collection.class));
    return EasyMock.expectLastCall();
}
Also used : HashMap(java.util.HashMap) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection)

Example 23 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.

the class WorkerSinkTaskThreadedTest method expectPolls.

// Note that this can only be called once per test currently
private Capture<Collection<SinkRecord>> expectPolls(final long pollDelayMs) throws Exception {
    // Stub out all the consumer stream/iterator responses, which we just want to verify occur,
    // but don't care about the exact details here.
    EasyMock.expect(consumer.poll(EasyMock.anyLong())).andStubAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {

        @Override
        public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
            // "Sleep" so time will progress
            time.sleep(pollDelayMs);
            ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE))));
            recordsReturned++;
            return records;
        }
    });
    EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY)).anyTimes();
    EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE)).anyTimes();
    final Capture<SinkRecord> recordCapture = EasyMock.newCapture();
    EasyMock.expect(transformationChain.apply(EasyMock.capture(recordCapture))).andAnswer(new IAnswer<SinkRecord>() {

        @Override
        public SinkRecord answer() {
            return recordCapture.getValue();
        }
    }).anyTimes();
    Capture<Collection<SinkRecord>> capturedRecords = EasyMock.newCapture(CaptureType.ALL);
    sinkTask.put(EasyMock.capture(capturedRecords));
    EasyMock.expectLastCall().anyTimes();
    return capturedRecords;
}
Also used : SinkRecord(org.apache.kafka.connect.sink.SinkRecord) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) IAnswer(org.easymock.IAnswer) TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection)

Example 24 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.

the class WorkerSinkTaskThreadedTest method expectOnePoll.

@SuppressWarnings("unchecked")
private IExpectationSetters<Object> expectOnePoll() {
    // Currently the SinkTask's put() method will not be invoked unless we provide some data, so instead of
    // returning empty data, we return one record. The expectation is that the data will be ignored by the
    // response behavior specified using the return value of this method.
    EasyMock.expect(consumer.poll(EasyMock.anyLong())).andAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {

        @Override
        public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
            // "Sleep" so time will progress
            time.sleep(1L);
            ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE))));
            recordsReturned++;
            return records;
        }
    });
    EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY));
    EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE));
    sinkTask.put(EasyMock.anyObject(Collection.class));
    return EasyMock.expectLastCall();
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue)

Example 25 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.

the class VerifiableConsumer method onRecordsReceived.

private Map<TopicPartition, OffsetAndMetadata> onRecordsReceived(ConsumerRecords<String, String> records) {
    Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
    List<RecordSetSummary> summaries = new ArrayList<>();
    for (TopicPartition tp : records.partitions()) {
        List<ConsumerRecord<String, String>> partitionRecords = records.records(tp);
        if (hasMessageLimit() && consumedMessages + partitionRecords.size() > maxMessages)
            partitionRecords = partitionRecords.subList(0, maxMessages - consumedMessages);
        if (partitionRecords.isEmpty())
            continue;
        long minOffset = partitionRecords.get(0).offset();
        long maxOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
        offsets.put(tp, new OffsetAndMetadata(maxOffset + 1));
        summaries.add(new RecordSetSummary(tp.topic(), tp.partition(), partitionRecords.size(), minOffset, maxOffset));
        if (verbose) {
            for (ConsumerRecord<String, String> record : partitionRecords) printJson(new RecordData(record));
        }
        consumedMessages += partitionRecords.size();
        if (isFinished())
            break;
    }
    printJson(new RecordsConsumed(records.count(), summaries));
    return offsets;
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata)

Aggregations

ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)309 TopicPartition (org.apache.kafka.common.TopicPartition)158 Test (org.junit.Test)145 ArrayList (java.util.ArrayList)120 List (java.util.List)99 HashMap (java.util.HashMap)97 Map (java.util.Map)70 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)61 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)51 Test (org.junit.jupiter.api.Test)35 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)33 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)31 LinkedHashMap (java.util.LinkedHashMap)30 Header (org.apache.kafka.common.header.Header)29 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)28 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)28 TimeUnit (java.util.concurrent.TimeUnit)27 Set (java.util.Set)24 Collectors (java.util.stream.Collectors)24 ByteBuffer (java.nio.ByteBuffer)22