use of org.apache.kafka.clients.consumer.ConsumerRecord in project ksql by confluentinc.
the class TopicStreamWriterFormatTest method shouldMatchAvroFormatter.
@Test
public void shouldMatchAvroFormatter() throws Exception {
/**
* Build an AVRO message
*/
String USER_SCHEMA = "{\n" + " \"fields\": [\n" + " { \"name\": \"str1\", \"type\": \"string\" }\n" + " ],\n" + " \"name\": \"myrecord\",\n" + " \"type\": \"record\"\n" + "}";
Schema.Parser parser = new Schema.Parser();
Schema schema = parser.parse(USER_SCHEMA);
GenericData.Record avroRecord = new GenericData.Record(schema);
avroRecord.put("str1", "My first string");
/**
* Setup expects
*/
SchemaRegistryClient schemaRegistryClient = mock(SchemaRegistryClient.class);
expect(schemaRegistryClient.register(anyString(), anyObject())).andReturn(1);
expect(schemaRegistryClient.getById(anyInt())).andReturn(schema);
replay(schemaRegistryClient);
Map<String, String> props = new HashMap<>();
props.put("schema.registry.url", "localhost:9092");
KafkaAvroSerializer avroSerializer = new KafkaAvroSerializer(schemaRegistryClient, props);
/**
* Test data
*/
byte[] testRecordBytes = avroSerializer.serialize("topic", avroRecord);
ConsumerRecord<String, Bytes> record = new ConsumerRecord<String, Bytes>("topic", 1, 1, "key", new Bytes(testRecordBytes));
/**
* Assert
*/
assertTrue(TopicStreamWriter.Format.AVRO.isFormat("topic", record, schemaRegistryClient));
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSinkTaskThreadedTest method expectRebalanceDuringPoll.
@SuppressWarnings("unchecked")
private IExpectationSetters<Object> expectRebalanceDuringPoll() throws Exception {
final List<TopicPartition> partitions = Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3);
final long startOffset = 40L;
final Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(TOPIC_PARTITION, startOffset);
EasyMock.expect(consumer.poll(EasyMock.anyLong())).andAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {
@Override
public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
// "Sleep" so time will progress
time.sleep(1L);
sinkTaskContext.getValue().offset(offsets);
rebalanceListener.getValue().onPartitionsAssigned(partitions);
ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE))));
recordsReturned++;
return records;
}
});
EasyMock.expect(consumer.position(TOPIC_PARTITION)).andReturn(FIRST_OFFSET);
EasyMock.expect(consumer.position(TOPIC_PARTITION2)).andReturn(FIRST_OFFSET);
EasyMock.expect(consumer.position(TOPIC_PARTITION3)).andReturn(FIRST_OFFSET);
sinkTask.open(partitions);
EasyMock.expectLastCall();
consumer.seek(TOPIC_PARTITION, startOffset);
EasyMock.expectLastCall();
EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY));
EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE));
sinkTask.put(EasyMock.anyObject(Collection.class));
return EasyMock.expectLastCall();
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSinkTaskThreadedTest method expectPolls.
// Note that this can only be called once per test currently
private Capture<Collection<SinkRecord>> expectPolls(final long pollDelayMs) throws Exception {
// Stub out all the consumer stream/iterator responses, which we just want to verify occur,
// but don't care about the exact details here.
EasyMock.expect(consumer.poll(EasyMock.anyLong())).andStubAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {
@Override
public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
// "Sleep" so time will progress
time.sleep(pollDelayMs);
ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE))));
recordsReturned++;
return records;
}
});
EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY)).anyTimes();
EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE)).anyTimes();
final Capture<SinkRecord> recordCapture = EasyMock.newCapture();
EasyMock.expect(transformationChain.apply(EasyMock.capture(recordCapture))).andAnswer(new IAnswer<SinkRecord>() {
@Override
public SinkRecord answer() {
return recordCapture.getValue();
}
}).anyTimes();
Capture<Collection<SinkRecord>> capturedRecords = EasyMock.newCapture(CaptureType.ALL);
sinkTask.put(EasyMock.capture(capturedRecords));
EasyMock.expectLastCall().anyTimes();
return capturedRecords;
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSinkTaskThreadedTest method expectOnePoll.
@SuppressWarnings("unchecked")
private IExpectationSetters<Object> expectOnePoll() {
// Currently the SinkTask's put() method will not be invoked unless we provide some data, so instead of
// returning empty data, we return one record. The expectation is that the data will be ignored by the
// response behavior specified using the return value of this method.
EasyMock.expect(consumer.poll(EasyMock.anyLong())).andAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {
@Override
public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
// "Sleep" so time will progress
time.sleep(1L);
ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE))));
recordsReturned++;
return records;
}
});
EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY));
EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE));
sinkTask.put(EasyMock.anyObject(Collection.class));
return EasyMock.expectLastCall();
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.
the class VerifiableConsumer method onRecordsReceived.
private Map<TopicPartition, OffsetAndMetadata> onRecordsReceived(ConsumerRecords<String, String> records) {
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
List<RecordSetSummary> summaries = new ArrayList<>();
for (TopicPartition tp : records.partitions()) {
List<ConsumerRecord<String, String>> partitionRecords = records.records(tp);
if (hasMessageLimit() && consumedMessages + partitionRecords.size() > maxMessages)
partitionRecords = partitionRecords.subList(0, maxMessages - consumedMessages);
if (partitionRecords.isEmpty())
continue;
long minOffset = partitionRecords.get(0).offset();
long maxOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
offsets.put(tp, new OffsetAndMetadata(maxOffset + 1));
summaries.add(new RecordSetSummary(tp.topic(), tp.partition(), partitionRecords.size(), minOffset, maxOffset));
if (verbose) {
for (ConsumerRecord<String, String> record : partitionRecords) printJson(new RecordData(record));
}
consumedMessages += partitionRecords.size();
if (isFinished())
break;
}
printJson(new RecordsConsumed(records.count(), summaries));
return offsets;
}
Aggregations