Search in sources :

Example 1 with SchemaAndValue

use of org.apache.kafka.connect.data.SchemaAndValue in project apache-kafka-on-k8s by banzaicloud.

the class WorkerSinkTaskTest method expectConversionAndTransformation.

private void expectConversionAndTransformation(final int numMessages, final String topicPrefix) {
    EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY)).times(numMessages);
    EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE)).times(numMessages);
    final Capture<SinkRecord> recordCapture = EasyMock.newCapture();
    EasyMock.expect(transformationChain.apply(EasyMock.capture(recordCapture))).andAnswer(new IAnswer<SinkRecord>() {

        @Override
        public SinkRecord answer() {
            SinkRecord origRecord = recordCapture.getValue();
            return topicPrefix != null && !topicPrefix.isEmpty() ? origRecord.newRecord(topicPrefix + origRecord.topic(), origRecord.kafkaPartition(), origRecord.keySchema(), origRecord.key(), origRecord.valueSchema(), origRecord.value(), origRecord.timestamp()) : origRecord;
        }
    }).times(numMessages);
}
Also used : IAnswer(org.easymock.IAnswer) SinkRecord(org.apache.kafka.connect.sink.SinkRecord) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue)

Example 2 with SchemaAndValue

use of org.apache.kafka.connect.data.SchemaAndValue in project apache-kafka-on-k8s by banzaicloud.

the class WorkerSinkTaskThreadedTest method expectRebalanceDuringPoll.

@SuppressWarnings("unchecked")
private IExpectationSetters<Object> expectRebalanceDuringPoll() throws Exception {
    final List<TopicPartition> partitions = Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3);
    final long startOffset = 40L;
    final Map<TopicPartition, Long> offsets = new HashMap<>();
    offsets.put(TOPIC_PARTITION, startOffset);
    EasyMock.expect(consumer.poll(EasyMock.anyLong())).andAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {

        @Override
        public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
            // "Sleep" so time will progress
            time.sleep(1L);
            sinkTaskContext.getValue().offset(offsets);
            rebalanceListener.getValue().onPartitionsAssigned(partitions);
            ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE))));
            recordsReturned++;
            return records;
        }
    });
    EasyMock.expect(consumer.position(TOPIC_PARTITION)).andReturn(FIRST_OFFSET);
    EasyMock.expect(consumer.position(TOPIC_PARTITION2)).andReturn(FIRST_OFFSET);
    EasyMock.expect(consumer.position(TOPIC_PARTITION3)).andReturn(FIRST_OFFSET);
    sinkTask.open(partitions);
    EasyMock.expectLastCall();
    consumer.seek(TOPIC_PARTITION, startOffset);
    EasyMock.expectLastCall();
    EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY));
    EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE));
    sinkTask.put(EasyMock.anyObject(Collection.class));
    return EasyMock.expectLastCall();
}
Also used : HashMap(java.util.HashMap) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection)

Example 3 with SchemaAndValue

use of org.apache.kafka.connect.data.SchemaAndValue in project apache-kafka-on-k8s by banzaicloud.

the class WorkerSinkTaskThreadedTest method expectPolls.

// Note that this can only be called once per test currently
private Capture<Collection<SinkRecord>> expectPolls(final long pollDelayMs) throws Exception {
    // Stub out all the consumer stream/iterator responses, which we just want to verify occur,
    // but don't care about the exact details here.
    EasyMock.expect(consumer.poll(EasyMock.anyLong())).andStubAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {

        @Override
        public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
            // "Sleep" so time will progress
            time.sleep(pollDelayMs);
            ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE))));
            recordsReturned++;
            return records;
        }
    });
    EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY)).anyTimes();
    EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE)).anyTimes();
    final Capture<SinkRecord> recordCapture = EasyMock.newCapture();
    EasyMock.expect(transformationChain.apply(EasyMock.capture(recordCapture))).andAnswer(new IAnswer<SinkRecord>() {

        @Override
        public SinkRecord answer() {
            return recordCapture.getValue();
        }
    }).anyTimes();
    Capture<Collection<SinkRecord>> capturedRecords = EasyMock.newCapture(CaptureType.ALL);
    sinkTask.put(EasyMock.capture(capturedRecords));
    EasyMock.expectLastCall().anyTimes();
    return capturedRecords;
}
Also used : SinkRecord(org.apache.kafka.connect.sink.SinkRecord) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) IAnswer(org.easymock.IAnswer) TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection)

Example 4 with SchemaAndValue

use of org.apache.kafka.connect.data.SchemaAndValue in project apache-kafka-on-k8s by banzaicloud.

the class WorkerSinkTaskThreadedTest method expectOnePoll.

@SuppressWarnings("unchecked")
private IExpectationSetters<Object> expectOnePoll() {
    // Currently the SinkTask's put() method will not be invoked unless we provide some data, so instead of
    // returning empty data, we return one record. The expectation is that the data will be ignored by the
    // response behavior specified using the return value of this method.
    EasyMock.expect(consumer.poll(EasyMock.anyLong())).andAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {

        @Override
        public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
            // "Sleep" so time will progress
            time.sleep(1L);
            ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE))));
            recordsReturned++;
            return records;
        }
    });
    EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY));
    EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE));
    sinkTask.put(EasyMock.anyObject(Collection.class));
    return EasyMock.expectLastCall();
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue)

Example 5 with SchemaAndValue

use of org.apache.kafka.connect.data.SchemaAndValue in project apache-kafka-on-k8s by banzaicloud.

the class WorkerSinkTask method convertHeadersFor.

private Headers convertHeadersFor(ConsumerRecord<byte[], byte[]> record) {
    Headers result = new ConnectHeaders();
    org.apache.kafka.common.header.Headers recordHeaders = record.headers();
    if (recordHeaders != null) {
        String topic = record.topic();
        for (org.apache.kafka.common.header.Header recordHeader : recordHeaders) {
            SchemaAndValue schemaAndValue = headerConverter.toConnectHeader(topic, recordHeader.key(), recordHeader.value());
            result.add(recordHeader.key(), schemaAndValue);
        }
    }
    return result;
}
Also used : ConnectHeaders(org.apache.kafka.connect.header.ConnectHeaders) ConnectHeaders(org.apache.kafka.connect.header.ConnectHeaders) Headers(org.apache.kafka.connect.header.Headers) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue)

Aggregations

SchemaAndValue (org.apache.kafka.connect.data.SchemaAndValue)140 Test (org.junit.Test)57 Schema (org.apache.kafka.connect.data.Schema)49 Test (org.junit.jupiter.api.Test)46 HashMap (java.util.HashMap)32 Struct (org.apache.kafka.connect.data.Struct)21 Date (org.apache.kafka.connect.data.Date)18 BigInteger (java.math.BigInteger)12 Map (java.util.Map)12 ConnectorStatus (org.apache.kafka.connect.runtime.ConnectorStatus)11 BigDecimal (java.math.BigDecimal)10 TopicPartition (org.apache.kafka.common.TopicPartition)9 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)8 TaskStatus (org.apache.kafka.connect.runtime.TaskStatus)8 Callback (org.apache.kafka.clients.producer.Callback)7 SinkRecord (org.apache.kafka.connect.sink.SinkRecord)7 ArgumentMatchers.anyString (org.mockito.ArgumentMatchers.anyString)7 Collection (java.util.Collection)6 GregorianCalendar (java.util.GregorianCalendar)6 LinkedHashMap (java.util.LinkedHashMap)6