use of org.apache.kafka.connect.source.SourceRecord in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSourceTaskTest method testSendRecordsPropagatesTimestamp.
@Test
public void testSendRecordsPropagatesTimestamp() throws Exception {
final Long timestamp = System.currentTimeMillis();
createWorkerTask();
List<SourceRecord> records = Collections.singletonList(new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, timestamp));
Capture<ProducerRecord<byte[], byte[]>> sent = expectSendRecordAnyTimes();
PowerMock.replayAll();
Whitebox.setInternalState(workerTask, "toSend", records);
Whitebox.invokeMethod(workerTask, "sendRecords");
assertEquals(timestamp, sent.getValue().timestamp());
PowerMock.verifyAll();
}
use of org.apache.kafka.connect.source.SourceRecord in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSourceTaskTest method testSendRecordsCorruptTimestamp.
@Test(expected = InvalidRecordException.class)
public void testSendRecordsCorruptTimestamp() throws Exception {
final Long timestamp = -3L;
createWorkerTask();
List<SourceRecord> records = Collections.singletonList(new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, timestamp));
Capture<ProducerRecord<byte[], byte[]>> sent = expectSendRecordAnyTimes();
PowerMock.replayAll();
Whitebox.setInternalState(workerTask, "toSend", records);
Whitebox.invokeMethod(workerTask, "sendRecords");
assertEquals(null, sent.getValue().timestamp());
PowerMock.verifyAll();
}
use of org.apache.kafka.connect.source.SourceRecord in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSourceTaskTest method testSendRecordsNoTimestamp.
@Test
public void testSendRecordsNoTimestamp() throws Exception {
final Long timestamp = -1L;
createWorkerTask();
List<SourceRecord> records = Collections.singletonList(new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, timestamp));
Capture<ProducerRecord<byte[], byte[]>> sent = expectSendRecordAnyTimes();
PowerMock.replayAll();
Whitebox.setInternalState(workerTask, "toSend", records);
Whitebox.invokeMethod(workerTask, "sendRecords");
assertEquals(null, sent.getValue().timestamp());
PowerMock.verifyAll();
}
use of org.apache.kafka.connect.source.SourceRecord in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSourceTask method sendRecords.
/**
* Try to send a batch of records. If a send fails and is retriable, this saves the remainder of the batch so it can
* be retried after backing off. If a send fails and is not retriable, this will throw a ConnectException.
* @return true if all messages were sent, false if some need to be retried
*/
private boolean sendRecords() {
int processed = 0;
recordBatch(toSend.size());
final SourceRecordWriteCounter counter = new SourceRecordWriteCounter(toSend.size(), sourceTaskMetricsGroup);
for (final SourceRecord preTransformRecord : toSend) {
final SourceRecord record = transformationChain.apply(preTransformRecord);
if (record == null) {
counter.skipRecord();
commitTaskRecord(preTransformRecord);
continue;
}
RecordHeaders headers = convertHeaderFor(record);
byte[] key = keyConverter.fromConnectData(record.topic(), record.keySchema(), record.key());
byte[] value = valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value());
final ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(record.topic(), record.kafkaPartition(), ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value, headers);
log.trace("{} Appending record with key {}, value {}", this, record.key(), record.value());
// messages and update the offsets.
synchronized (this) {
if (!lastSendFailed) {
if (!flushing) {
outstandingMessages.put(producerRecord, producerRecord);
} else {
outstandingMessagesBacklog.put(producerRecord, producerRecord);
}
// Offsets are converted & serialized in the OffsetWriter
offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
}
}
try {
final String topic = producerRecord.topic();
producer.send(producerRecord, new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {
// Given the default settings for zero data loss, this should basically never happen --
// between "infinite" retries, indefinite blocking on full buffers, and "infinite" request
// timeouts, callbacks with exceptions should never be invoked in practice. If the
// user overrode these settings, the best we can do is notify them of the failure via
// logging.
log.error("{} failed to send record to {}: {}", this, topic, e);
log.debug("{} Failed record: {}", this, preTransformRecord);
} else {
log.trace("{} Wrote record successfully: topic {} partition {} offset {}", this, recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
commitTaskRecord(preTransformRecord);
}
recordSent(producerRecord);
counter.completeRecord();
}
});
lastSendFailed = false;
} catch (RetriableException e) {
log.warn("{} Failed to send {}, backing off before retrying:", this, producerRecord, e);
toSend = toSend.subList(processed, toSend.size());
lastSendFailed = true;
counter.retryRemaining();
return false;
} catch (KafkaException e) {
throw new ConnectException("Unrecoverable exception trying to send", e);
}
processed++;
}
toSend = null;
return true;
}
use of org.apache.kafka.connect.source.SourceRecord in project apache-kafka-on-k8s by banzaicloud.
the class CastTest method castWholeRecordValueSchemalessFloat32.
@Test
public void castWholeRecordValueSchemalessFloat32() {
xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "float32"));
SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42));
assertNull(transformed.valueSchema());
assertEquals(42.f, transformed.value());
}
Aggregations