use of org.apache.kafka.connect.sink.SinkRecord in project kafka by apache.
the class WorkerSinkTaskTest method testWakeupInCommitSyncCausesRetry.
@Test
public void testWakeupInCommitSyncCausesRetry() throws Exception {
expectInitializeTask();
expectPollInitialAssignment();
expectConsumerPoll(1);
expectConversionAndTransformation(1);
sinkTask.put(EasyMock.<Collection<SinkRecord>>anyObject());
EasyMock.expectLastCall();
final List<TopicPartition> partitions = asList(TOPIC_PARTITION, TOPIC_PARTITION2);
final Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 1));
offsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
sinkTask.preCommit(offsets);
EasyMock.expectLastCall().andReturn(offsets);
// first one raises wakeup
consumer.commitSync(EasyMock.<Map<TopicPartition, OffsetAndMetadata>>anyObject());
EasyMock.expectLastCall().andThrow(new WakeupException());
// we should retry and complete the commit
consumer.commitSync(EasyMock.<Map<TopicPartition, OffsetAndMetadata>>anyObject());
EasyMock.expectLastCall();
sinkTask.close(new HashSet<>(partitions));
EasyMock.expectLastCall();
EasyMock.expect(consumer.position(TOPIC_PARTITION)).andReturn(FIRST_OFFSET);
EasyMock.expect(consumer.position(TOPIC_PARTITION2)).andReturn(FIRST_OFFSET);
sinkTask.open(partitions);
EasyMock.expectLastCall();
EasyMock.expect(consumer.poll(EasyMock.anyLong())).andAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {
@Override
public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
rebalanceListener.getValue().onPartitionsRevoked(partitions);
rebalanceListener.getValue().onPartitionsAssigned(partitions);
return ConsumerRecords.empty();
}
});
EasyMock.expect(consumer.assignment()).andReturn(new HashSet<>(partitions));
consumer.resume(Collections.singleton(TOPIC_PARTITION));
EasyMock.expectLastCall();
consumer.resume(Collections.singleton(TOPIC_PARTITION2));
EasyMock.expectLastCall();
statusListener.onResume(taskId);
EasyMock.expectLastCall();
PowerMock.replayAll();
workerTask.initialize(TASK_CONFIG);
workerTask.initializeAndStart();
// poll for initial assignment
workerTask.iteration();
// first record delivered
workerTask.iteration();
// now rebalance with the wakeup triggered
workerTask.iteration();
PowerMock.verifyAll();
}
use of org.apache.kafka.connect.sink.SinkRecord in project kafka by apache.
the class VerifiableSinkTask method put.
@Override
public void put(Collection<SinkRecord> records) {
long nowMs = System.currentTimeMillis();
for (SinkRecord record : records) {
Map<String, Object> data = new HashMap<>();
data.put("name", name);
// VerifiableSourceTask's input task (source partition)
data.put("task", record.key());
data.put("sinkTask", id);
data.put("topic", record.topic());
data.put("time_ms", nowMs);
data.put("seqno", record.value());
data.put("offset", record.kafkaOffset());
String dataJson;
try {
dataJson = JSON_SERDE.writeValueAsString(data);
} catch (JsonProcessingException e) {
dataJson = "Bad data can't be written as json: " + e.getMessage();
}
System.out.println(dataJson);
unflushed.add(data);
}
}
use of org.apache.kafka.connect.sink.SinkRecord in project kafka by apache.
the class WorkerSinkTask method convertMessages.
private void convertMessages(ConsumerRecords<byte[], byte[]> msgs) {
for (ConsumerRecord<byte[], byte[]> msg : msgs) {
log.trace("Consuming message with key {}, value {}", msg.key(), msg.value());
SchemaAndValue keyAndSchema = keyConverter.toConnectData(msg.topic(), msg.key());
SchemaAndValue valueAndSchema = valueConverter.toConnectData(msg.topic(), msg.value());
SinkRecord record = new SinkRecord(msg.topic(), msg.partition(), keyAndSchema.schema(), keyAndSchema.value(), valueAndSchema.schema(), valueAndSchema.value(), msg.offset(), ConnectUtils.checkAndConvertTimestamp(msg.timestamp()), msg.timestampType());
record = transformationChain.apply(record);
if (record != null) {
messageBatch.add(record);
}
}
}
use of org.apache.kafka.connect.sink.SinkRecord in project kafka by apache.
the class SetSchemaMetadataTest method schemaNameAndVersionUpdate.
@Test
public void schemaNameAndVersionUpdate() {
final Map<String, String> props = new HashMap<>();
props.put("schema.name", "foo");
props.put("schema.version", "42");
final SetSchemaMetadata<SinkRecord> xform = new SetSchemaMetadata.Value<>();
xform.configure(props);
final SinkRecord record = new SinkRecord("", 0, null, null, SchemaBuilder.struct().build(), null, 0);
final SinkRecord updatedRecord = xform.apply(record);
assertEquals("foo", updatedRecord.valueSchema().name());
assertEquals(new Integer(42), updatedRecord.valueSchema().version());
}
use of org.apache.kafka.connect.sink.SinkRecord in project kafka by apache.
the class FileStreamSinkTaskTest method testPutFlush.
@Test
public void testPutFlush() {
HashMap<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
final String newLine = System.getProperty("line.separator");
// We do not call task.start() since it would override the output stream
task.put(Arrays.asList(new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line1", 1)));
offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(1L));
task.flush(offsets);
assertEquals("line1" + newLine, os.toString());
task.put(Arrays.asList(new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line2", 2), new SinkRecord("topic2", 0, null, null, Schema.STRING_SCHEMA, "line3", 1)));
offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(2L));
offsets.put(new TopicPartition("topic2", 0), new OffsetAndMetadata(1L));
task.flush(offsets);
assertEquals("line1" + newLine + "line2" + newLine + "line3" + newLine, os.toString());
}
Aggregations