use of org.apache.kafka.connect.runtime.errors.LogReporter in project kafka by apache.
the class ErrorHandlingTaskTest method testErrorHandlingInSinkTasks.
@Test
public void testErrorHandlingInSinkTasks() throws Exception {
Map<String, String> reportProps = new HashMap<>();
reportProps.put(ConnectorConfig.ERRORS_LOG_ENABLE_CONFIG, "true");
reportProps.put(ConnectorConfig.ERRORS_LOG_INCLUDE_MESSAGES_CONFIG, "true");
LogReporter reporter = new LogReporter(taskId, connConfig(reportProps), errorHandlingMetrics);
RetryWithToleranceOperator retryWithToleranceOperator = operator();
retryWithToleranceOperator.metrics(errorHandlingMetrics);
retryWithToleranceOperator.reporters(singletonList(reporter));
createSinkTask(initialState, retryWithToleranceOperator);
expectInitializeTask();
expectTaskGetTopic(true);
// valid json
ConsumerRecord<byte[], byte[]> record1 = new ConsumerRecord<>(TOPIC, PARTITION1, FIRST_OFFSET, null, "{\"a\": 10}".getBytes());
// bad json
ConsumerRecord<byte[], byte[]> record2 = new ConsumerRecord<>(TOPIC, PARTITION2, FIRST_OFFSET, null, "{\"a\" 10}".getBytes());
EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andReturn(records(record1));
EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andReturn(records(record2));
sinkTask.put(EasyMock.anyObject());
EasyMock.expectLastCall().times(2);
PowerMock.replayAll();
workerSinkTask.initialize(TASK_CONFIG);
workerSinkTask.initializeAndStart();
workerSinkTask.iteration();
workerSinkTask.iteration();
// two records were consumed from Kafka
assertSinkMetricValue("sink-record-read-total", 2.0);
// only one was written to the task
assertSinkMetricValue("sink-record-send-total", 1.0);
// one record completely failed (converter issues)
assertErrorHandlingMetricValue("total-record-errors", 1.0);
// 2 failures in the transformation, and 1 in the converter
assertErrorHandlingMetricValue("total-record-failures", 3.0);
// one record completely failed (converter issues), and thus was skipped
assertErrorHandlingMetricValue("total-records-skipped", 1.0);
PowerMock.verifyAll();
}
use of org.apache.kafka.connect.runtime.errors.LogReporter in project kafka by apache.
the class ErrorHandlingTaskTest method testErrorHandlingInSourceTasks.
@Test
public void testErrorHandlingInSourceTasks() throws Exception {
Map<String, String> reportProps = new HashMap<>();
reportProps.put(ConnectorConfig.ERRORS_LOG_ENABLE_CONFIG, "true");
reportProps.put(ConnectorConfig.ERRORS_LOG_INCLUDE_MESSAGES_CONFIG, "true");
LogReporter reporter = new LogReporter(taskId, connConfig(reportProps), errorHandlingMetrics);
RetryWithToleranceOperator retryWithToleranceOperator = operator();
retryWithToleranceOperator.metrics(errorHandlingMetrics);
retryWithToleranceOperator.reporters(singletonList(reporter));
createSourceTask(initialState, retryWithToleranceOperator);
// valid json
Schema valSchema = SchemaBuilder.struct().field("val", Schema.INT32_SCHEMA).build();
Struct struct1 = new Struct(valSchema).put("val", 1234);
SourceRecord record1 = new SourceRecord(emptyMap(), emptyMap(), TOPIC, PARTITION1, valSchema, struct1);
Struct struct2 = new Struct(valSchema).put("val", 6789);
SourceRecord record2 = new SourceRecord(emptyMap(), emptyMap(), TOPIC, PARTITION1, valSchema, struct2);
EasyMock.expect(workerSourceTask.isStopping()).andReturn(false);
EasyMock.expect(workerSourceTask.isStopping()).andReturn(false);
EasyMock.expect(workerSourceTask.isStopping()).andReturn(true);
EasyMock.expect(workerSourceTask.commitOffsets()).andReturn(true);
sourceTask.initialize(EasyMock.anyObject());
EasyMock.expectLastCall();
sourceTask.start(EasyMock.anyObject());
EasyMock.expectLastCall();
EasyMock.expect(sourceTask.poll()).andReturn(singletonList(record1));
EasyMock.expect(sourceTask.poll()).andReturn(singletonList(record2));
expectTopicCreation(TOPIC);
EasyMock.expect(producer.send(EasyMock.anyObject(), EasyMock.anyObject())).andReturn(null).times(2);
PowerMock.replayAll();
workerSourceTask.initialize(TASK_CONFIG);
workerSourceTask.initializeAndStart();
workerSourceTask.execute();
// two records were consumed from Kafka
assertSourceMetricValue("source-record-poll-total", 2.0);
// only one was written to the task
assertSourceMetricValue("source-record-write-total", 0.0);
// one record completely failed (converter issues)
assertErrorHandlingMetricValue("total-record-errors", 0.0);
// 2 failures in the transformation, and 1 in the converter
assertErrorHandlingMetricValue("total-record-failures", 4.0);
// one record completely failed (converter issues), and thus was skipped
assertErrorHandlingMetricValue("total-records-skipped", 0.0);
PowerMock.verifyAll();
}
use of org.apache.kafka.connect.runtime.errors.LogReporter in project kafka by apache.
the class ErrorHandlingTaskTest method testErrorHandlingInSourceTasksWthBadConverter.
@Test
public void testErrorHandlingInSourceTasksWthBadConverter() throws Exception {
Map<String, String> reportProps = new HashMap<>();
reportProps.put(ConnectorConfig.ERRORS_LOG_ENABLE_CONFIG, "true");
reportProps.put(ConnectorConfig.ERRORS_LOG_INCLUDE_MESSAGES_CONFIG, "true");
LogReporter reporter = new LogReporter(taskId, connConfig(reportProps), errorHandlingMetrics);
RetryWithToleranceOperator retryWithToleranceOperator = operator();
retryWithToleranceOperator.metrics(errorHandlingMetrics);
retryWithToleranceOperator.reporters(singletonList(reporter));
createSourceTask(initialState, retryWithToleranceOperator, badConverter());
// valid json
Schema valSchema = SchemaBuilder.struct().field("val", Schema.INT32_SCHEMA).build();
Struct struct1 = new Struct(valSchema).put("val", 1234);
SourceRecord record1 = new SourceRecord(emptyMap(), emptyMap(), TOPIC, PARTITION1, valSchema, struct1);
Struct struct2 = new Struct(valSchema).put("val", 6789);
SourceRecord record2 = new SourceRecord(emptyMap(), emptyMap(), TOPIC, PARTITION1, valSchema, struct2);
EasyMock.expect(workerSourceTask.isStopping()).andReturn(false);
EasyMock.expect(workerSourceTask.isStopping()).andReturn(false);
EasyMock.expect(workerSourceTask.isStopping()).andReturn(true);
EasyMock.expect(workerSourceTask.commitOffsets()).andReturn(true);
sourceTask.initialize(EasyMock.anyObject());
EasyMock.expectLastCall();
sourceTask.start(EasyMock.anyObject());
EasyMock.expectLastCall();
EasyMock.expect(sourceTask.poll()).andReturn(singletonList(record1));
EasyMock.expect(sourceTask.poll()).andReturn(singletonList(record2));
expectTopicCreation(TOPIC);
EasyMock.expect(producer.send(EasyMock.anyObject(), EasyMock.anyObject())).andReturn(null).times(2);
PowerMock.replayAll();
workerSourceTask.initialize(TASK_CONFIG);
workerSourceTask.initializeAndStart();
workerSourceTask.execute();
// two records were consumed from Kafka
assertSourceMetricValue("source-record-poll-total", 2.0);
// only one was written to the task
assertSourceMetricValue("source-record-write-total", 0.0);
// one record completely failed (converter issues)
assertErrorHandlingMetricValue("total-record-errors", 0.0);
// 2 failures in the transformation, and 1 in the converter
assertErrorHandlingMetricValue("total-record-failures", 8.0);
// one record completely failed (converter issues), and thus was skipped
assertErrorHandlingMetricValue("total-records-skipped", 0.0);
PowerMock.verifyAll();
}
use of org.apache.kafka.connect.runtime.errors.LogReporter in project kafka by apache.
the class Worker method sinkTaskReporters.
private List<ErrorReporter> sinkTaskReporters(ConnectorTaskId id, SinkConnectorConfig connConfig, ErrorHandlingMetrics errorHandlingMetrics, Class<? extends Connector> connectorClass) {
ArrayList<ErrorReporter> reporters = new ArrayList<>();
LogReporter logReporter = new LogReporter(id, connConfig, errorHandlingMetrics);
reporters.add(logReporter);
// check if topic for dead letter queue exists
String topic = connConfig.dlqTopicName();
if (topic != null && !topic.isEmpty()) {
Map<String, Object> producerProps = producerConfigs(id, "connector-dlq-producer-" + id, config, connConfig, connectorClass, connectorClientConfigOverridePolicy, kafkaClusterId);
Map<String, Object> adminProps = adminConfigs(id, "connector-dlq-adminclient-", config, connConfig, connectorClass, connectorClientConfigOverridePolicy, kafkaClusterId);
DeadLetterQueueReporter reporter = DeadLetterQueueReporter.createAndSetup(adminProps, id, connConfig, producerProps, errorHandlingMetrics);
reporters.add(reporter);
}
return reporters;
}
use of org.apache.kafka.connect.runtime.errors.LogReporter in project kafka by apache.
the class Worker method sourceTaskReporters.
private List<ErrorReporter> sourceTaskReporters(ConnectorTaskId id, ConnectorConfig connConfig, ErrorHandlingMetrics errorHandlingMetrics) {
List<ErrorReporter> reporters = new ArrayList<>();
LogReporter logReporter = new LogReporter(id, connConfig, errorHandlingMetrics);
reporters.add(logReporter);
return reporters;
}
Aggregations