use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.
the class RecordAccumulatorTest method testAbortUnsentBatches.
@Test
public void testAbortUnsentBatches() throws Exception {
long lingerMs = Long.MAX_VALUE;
int numRecords = 100;
final AtomicInteger numExceptionReceivedInCallback = new AtomicInteger(0);
final RecordAccumulator accum = createTestRecordAccumulator(128 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, lingerMs);
final KafkaException cause = new KafkaException();
class TestCallback implements Callback {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
assertEquals(cause, exception);
numExceptionReceivedInCallback.incrementAndGet();
}
}
for (int i = 0; i < numRecords; i++) accum.append(new TopicPartition(topic, i % 3), 0L, key, value, null, new TestCallback(), maxBlockTimeMs);
RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds());
assertFalse(result.readyNodes.isEmpty());
Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
assertTrue(accum.hasUndrained());
assertTrue(accum.hasIncomplete());
accum.abortUndrainedBatches(cause);
int numDrainedRecords = 0;
for (Map.Entry<Integer, List<ProducerBatch>> drainedEntry : drained.entrySet()) {
for (ProducerBatch batch : drainedEntry.getValue()) {
assertTrue(batch.isClosed());
assertFalse(batch.produceFuture.completed());
numDrainedRecords += batch.recordCount;
}
}
assertTrue(numDrainedRecords > 0);
assertTrue(numExceptionReceivedInCallback.get() > 0);
assertEquals(numRecords, numExceptionReceivedInCallback.get() + numDrainedRecords);
assertFalse(accum.hasUndrained());
assertTrue(accum.hasIncomplete());
}
use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSourceTask method sendRecords.
/**
* Try to send a batch of records. If a send fails and is retriable, this saves the remainder of the batch so it can
* be retried after backing off. If a send fails and is not retriable, this will throw a ConnectException.
* @return true if all messages were sent, false if some need to be retried
*/
private boolean sendRecords() {
int processed = 0;
recordBatch(toSend.size());
final SourceRecordWriteCounter counter = new SourceRecordWriteCounter(toSend.size(), sourceTaskMetricsGroup);
for (final SourceRecord preTransformRecord : toSend) {
final SourceRecord record = transformationChain.apply(preTransformRecord);
if (record == null) {
counter.skipRecord();
commitTaskRecord(preTransformRecord);
continue;
}
RecordHeaders headers = convertHeaderFor(record);
byte[] key = keyConverter.fromConnectData(record.topic(), record.keySchema(), record.key());
byte[] value = valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value());
final ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(record.topic(), record.kafkaPartition(), ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value, headers);
log.trace("{} Appending record with key {}, value {}", this, record.key(), record.value());
// messages and update the offsets.
synchronized (this) {
if (!lastSendFailed) {
if (!flushing) {
outstandingMessages.put(producerRecord, producerRecord);
} else {
outstandingMessagesBacklog.put(producerRecord, producerRecord);
}
// Offsets are converted & serialized in the OffsetWriter
offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
}
}
try {
final String topic = producerRecord.topic();
producer.send(producerRecord, new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {
// Given the default settings for zero data loss, this should basically never happen --
// between "infinite" retries, indefinite blocking on full buffers, and "infinite" request
// timeouts, callbacks with exceptions should never be invoked in practice. If the
// user overrode these settings, the best we can do is notify them of the failure via
// logging.
log.error("{} failed to send record to {}: {}", this, topic, e);
log.debug("{} Failed record: {}", this, preTransformRecord);
} else {
log.trace("{} Wrote record successfully: topic {} partition {} offset {}", this, recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
commitTaskRecord(preTransformRecord);
}
recordSent(producerRecord);
counter.completeRecord();
}
});
lastSendFailed = false;
} catch (RetriableException e) {
log.warn("{} Failed to send {}, backing off before retrying:", this, producerRecord, e);
toSend = toSend.subList(processed, toSend.size());
lastSendFailed = true;
counter.retryRemaining();
return false;
} catch (KafkaException e) {
throw new ConnectException("Unrecoverable exception trying to send", e);
}
processed++;
}
toSend = null;
return true;
}
use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.
the class KafkaStatusBackingStoreTest method putConnectorStateShouldOverride.
@Test
public void putConnectorStateShouldOverride() {
final byte[] value = new byte[0];
String otherWorkerId = "anotherhost:8083";
KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
Converter converter = mock(Converter.class);
final KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);
// the persisted came from a different host and has a newer generation
Map<String, Object> firstStatusRead = new HashMap<>();
firstStatusRead.put("worker_id", otherWorkerId);
firstStatusRead.put("state", "RUNNING");
firstStatusRead.put("generation", 1L);
Map<String, Object> secondStatusRead = new HashMap<>();
secondStatusRead.put("worker_id", WORKER_ID);
secondStatusRead.put("state", "UNASSIGNED");
secondStatusRead.put("generation", 0L);
expect(converter.toConnectData(STATUS_TOPIC, value)).andReturn(new SchemaAndValue(null, firstStatusRead)).andReturn(new SchemaAndValue(null, secondStatusRead));
expect(converter.fromConnectData(eq(STATUS_TOPIC), anyObject(Schema.class), anyObject(Struct.class))).andStubReturn(value);
final Capture<Callback> callbackCapture = newCapture();
kafkaBasedLog.send(eq("status-connector-conn"), eq(value), capture(callbackCapture));
expectLastCall().andAnswer(new IAnswer<Void>() {
@Override
public Void answer() throws Throwable {
callbackCapture.getValue().onCompletion(null, null);
store.read(consumerRecord(1, "status-connector-conn", value));
return null;
}
});
replayAll();
store.read(consumerRecord(0, "status-connector-conn", value));
ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.UNASSIGNED, WORKER_ID, 0);
store.put(status);
assertEquals(status, store.get(CONNECTOR));
verifyAll();
}
use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.
the class KafkaStatusBackingStoreTest method putConnectorState.
@Test
public void putConnectorState() {
KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
Converter converter = mock(Converter.class);
KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);
byte[] value = new byte[0];
expect(converter.fromConnectData(eq(STATUS_TOPIC), anyObject(Schema.class), anyObject(Struct.class))).andStubReturn(value);
final Capture<Callback> callbackCapture = newCapture();
kafkaBasedLog.send(eq("status-connector-conn"), eq(value), capture(callbackCapture));
expectLastCall().andAnswer(new IAnswer<Void>() {
@Override
public Void answer() throws Throwable {
callbackCapture.getValue().onCompletion(null, null);
return null;
}
});
replayAll();
ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.RUNNING, WORKER_ID, 0);
store.put(status);
// state is not visible until read back from the log
assertEquals(null, store.get(CONNECTOR));
verifyAll();
}
use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.
the class KafkaStatusBackingStoreTest method putConnectorStateRetriableFailure.
@Test
public void putConnectorStateRetriableFailure() {
KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
Converter converter = mock(Converter.class);
KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);
byte[] value = new byte[0];
expect(converter.fromConnectData(eq(STATUS_TOPIC), anyObject(Schema.class), anyObject(Struct.class))).andStubReturn(value);
final Capture<Callback> callbackCapture = newCapture();
kafkaBasedLog.send(eq("status-connector-conn"), eq(value), capture(callbackCapture));
expectLastCall().andAnswer(new IAnswer<Void>() {
@Override
public Void answer() throws Throwable {
callbackCapture.getValue().onCompletion(null, new TimeoutException());
return null;
}
}).andAnswer(new IAnswer<Void>() {
@Override
public Void answer() throws Throwable {
callbackCapture.getValue().onCompletion(null, null);
return null;
}
});
replayAll();
ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.RUNNING, WORKER_ID, 0);
store.put(status);
// state is not visible until read back from the log
assertEquals(null, store.get(CONNECTOR));
verifyAll();
}
Aggregations