use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testCancelInFlightRequestAfterFatalError.
@Test
public void testCancelInFlightRequestAfterFatalError() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = new TransactionManager();
setupWithTransactionState(transactionManager);
client.setNode(new Node(1, "localhost", 33343));
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
// cluster authorization is a fatal error for the producer
Future<RecordMetadata> future1 = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
sender.run(time.milliseconds());
Future<RecordMetadata> future2 = accumulator.append(tp1, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
sender.run(time.milliseconds());
client.respond(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
return body instanceof ProduceRequest && ((ProduceRequest) body).isIdempotent();
}
}, produceResponse(tp0, -1, Errors.CLUSTER_AUTHORIZATION_FAILED, 0));
sender.run(time.milliseconds());
assertTrue(transactionManager.hasFatalError());
assertFutureFailure(future1, ClusterAuthorizationException.class);
sender.run(time.milliseconds());
assertFutureFailure(future2, ClusterAuthorizationException.class);
// Should be fine if the second response eventually returns
client.respond(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
return body instanceof ProduceRequest && ((ProduceRequest) body).isIdempotent();
}
}, produceResponse(tp1, 0, Errors.NONE, 0));
sender.run(time.milliseconds());
}
use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testSplitBatchAndSend.
@SuppressWarnings("deprecation")
private void testSplitBatchAndSend(TransactionManager txnManager, ProducerIdAndEpoch producerIdAndEpoch, TopicPartition tp) throws Exception {
int maxRetries = 1;
String topic = tp.topic();
// Set a good compression ratio.
CompressionRatioEstimator.setEstimation(topic, CompressionType.GZIP, 0.2f);
try (Metrics m = new Metrics()) {
accumulator = new RecordAccumulator(logContext, batchSize, 1024 * 1024, CompressionType.GZIP, 0L, 0L, m, time, new ApiVersions(), txnManager);
SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, 1000L, txnManager, new ApiVersions());
// Create a two broker cluster, with partition 0 on broker 0 and partition 1 on broker 1
Cluster cluster1 = TestUtils.clusterWith(2, topic, 2);
metadata.update(cluster1, Collections.<String>emptySet(), time.milliseconds());
// Send the first message.
Future<RecordMetadata> f1 = accumulator.append(tp, 0L, "key1".getBytes(), new byte[batchSize / 2], null, null, MAX_BLOCK_TIMEOUT).future;
Future<RecordMetadata> f2 = accumulator.append(tp, 0L, "key2".getBytes(), new byte[batchSize / 2], null, null, MAX_BLOCK_TIMEOUT).future;
// connect
sender.run(time.milliseconds());
// send produce request
sender.run(time.milliseconds());
assertEquals("The next sequence should be 2", 2, txnManager.sequenceNumber(tp).longValue());
String id = client.requests().peek().destination();
assertEquals(ApiKeys.PRODUCE, client.requests().peek().requestBuilder().apiKey());
Node node = new Node(Integer.valueOf(id), "localhost", 0);
assertEquals(1, client.inFlightRequestCount());
assertTrue("Client ready status should be true", client.isReady(node, 0L));
Map<TopicPartition, ProduceResponse.PartitionResponse> responseMap = new HashMap<>();
responseMap.put(tp, new ProduceResponse.PartitionResponse(Errors.MESSAGE_TOO_LARGE));
client.respond(new ProduceResponse(responseMap));
// split and reenqueue
sender.run(time.milliseconds());
assertEquals("The next sequence should be 2", 2, txnManager.sequenceNumber(tp).longValue());
// The compression ratio should have been improved once.
assertEquals(CompressionType.GZIP.rate - CompressionRatioEstimator.COMPRESSION_RATIO_IMPROVING_STEP, CompressionRatioEstimator.estimation(topic, CompressionType.GZIP), 0.01);
// send the first produce request
sender.run(time.milliseconds());
assertEquals("The next sequence number should be 2", 2, txnManager.sequenceNumber(tp).longValue());
assertFalse("The future shouldn't have been done.", f1.isDone());
assertFalse("The future shouldn't have been done.", f2.isDone());
id = client.requests().peek().destination();
assertEquals(ApiKeys.PRODUCE, client.requests().peek().requestBuilder().apiKey());
node = new Node(Integer.valueOf(id), "localhost", 0);
assertEquals(1, client.inFlightRequestCount());
assertTrue("Client ready status should be true", client.isReady(node, 0L));
responseMap.put(tp, new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L));
client.respond(produceRequestMatcher(tp, producerIdAndEpoch, 0, txnManager.isTransactional()), new ProduceResponse(responseMap));
// receive
sender.run(time.milliseconds());
assertTrue("The future should have been done.", f1.isDone());
assertEquals("The next sequence number should still be 2", 2, txnManager.sequenceNumber(tp).longValue());
assertEquals("The last ack'd sequence number should be 0", 0, txnManager.lastAckedSequence(tp));
assertFalse("The future shouldn't have been done.", f2.isDone());
assertEquals("Offset of the first message should be 0", 0L, f1.get().offset());
// send the seconcd produce request
sender.run(time.milliseconds());
id = client.requests().peek().destination();
assertEquals(ApiKeys.PRODUCE, client.requests().peek().requestBuilder().apiKey());
node = new Node(Integer.valueOf(id), "localhost", 0);
assertEquals(1, client.inFlightRequestCount());
assertTrue("Client ready status should be true", client.isReady(node, 0L));
responseMap.put(tp, new ProduceResponse.PartitionResponse(Errors.NONE, 1L, 0L, 0L));
client.respond(produceRequestMatcher(tp, producerIdAndEpoch, 1, txnManager.isTransactional()), new ProduceResponse(responseMap));
// receive
sender.run(time.milliseconds());
assertTrue("The future should have been done.", f2.isDone());
assertEquals("The next sequence number should be 2", 2, txnManager.sequenceNumber(tp).longValue());
assertEquals("The last ack'd sequence number should be 1", 1, txnManager.lastAckedSequence(tp));
assertEquals("Offset of the first message should be 1", 1L, f2.get().offset());
assertTrue("There should be no batch in the accumulator", accumulator.batches().get(tp).isEmpty());
assertTrue("There should be a split", m.metrics().get(senderMetrics.batchSplitRate).value() > 0);
}
}
use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testUnsupportedForMessageFormatInProduceRequest.
@Test
public void testUnsupportedForMessageFormatInProduceRequest() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = new TransactionManager();
setupWithTransactionState(transactionManager);
client.setNode(new Node(1, "localhost", 33343));
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
Future<RecordMetadata> future = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
client.prepareResponse(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
return body instanceof ProduceRequest && ((ProduceRequest) body).isIdempotent();
}
}, produceResponse(tp0, -1, Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT, 0));
sender.run(time.milliseconds());
assertFutureFailure(future, UnsupportedForMessageFormatException.class);
// unsupported for message format is not a fatal error
assertFalse(transactionManager.hasError());
}
use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testRetries.
@Test
public void testRetries() throws Exception {
// create a sender with retries = 1
int maxRetries = 1;
Metrics m = new Metrics();
SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
try {
Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, 50, null, apiVersions);
// do a successful retry
Future<RecordMetadata> future = accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
// connect
sender.run(time.milliseconds());
// send produce request
sender.run(time.milliseconds());
String id = client.requests().peek().destination();
Node node = new Node(Integer.parseInt(id), "localhost", 0);
assertEquals(1, client.inFlightRequestCount());
assertTrue(client.hasInFlightRequests());
assertTrue("Client ready status should be true", client.isReady(node, 0L));
client.disconnect(id);
assertEquals(0, client.inFlightRequestCount());
assertFalse(client.hasInFlightRequests());
assertFalse("Client ready status should be false", client.isReady(node, 0L));
// receive error
sender.run(time.milliseconds());
// reconnect
sender.run(time.milliseconds());
// resend
sender.run(time.milliseconds());
assertEquals(1, client.inFlightRequestCount());
assertTrue(client.hasInFlightRequests());
long offset = 0;
client.respond(produceResponse(tp0, offset, Errors.NONE, 0));
sender.run(time.milliseconds());
assertTrue("Request should have retried and completed", future.isDone());
assertEquals(offset, future.get().offset());
// do an unsuccessful retry
future = accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
// send produce request
sender.run(time.milliseconds());
for (int i = 0; i < maxRetries + 1; i++) {
client.disconnect(client.requests().peek().destination());
// receive error
sender.run(time.milliseconds());
// reconnect
sender.run(time.milliseconds());
// resend
sender.run(time.milliseconds());
}
sender.run(time.milliseconds());
assertFutureFailure(future, NetworkException.class);
} finally {
m.close();
}
}
use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSourceTask method sendRecords.
/**
* Try to send a batch of records. If a send fails and is retriable, this saves the remainder of the batch so it can
* be retried after backing off. If a send fails and is not retriable, this will throw a ConnectException.
* @return true if all messages were sent, false if some need to be retried
*/
private boolean sendRecords() {
int processed = 0;
recordBatch(toSend.size());
final SourceRecordWriteCounter counter = new SourceRecordWriteCounter(toSend.size(), sourceTaskMetricsGroup);
for (final SourceRecord preTransformRecord : toSend) {
final SourceRecord record = transformationChain.apply(preTransformRecord);
if (record == null) {
counter.skipRecord();
commitTaskRecord(preTransformRecord);
continue;
}
RecordHeaders headers = convertHeaderFor(record);
byte[] key = keyConverter.fromConnectData(record.topic(), record.keySchema(), record.key());
byte[] value = valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value());
final ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(record.topic(), record.kafkaPartition(), ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value, headers);
log.trace("{} Appending record with key {}, value {}", this, record.key(), record.value());
// messages and update the offsets.
synchronized (this) {
if (!lastSendFailed) {
if (!flushing) {
outstandingMessages.put(producerRecord, producerRecord);
} else {
outstandingMessagesBacklog.put(producerRecord, producerRecord);
}
// Offsets are converted & serialized in the OffsetWriter
offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
}
}
try {
final String topic = producerRecord.topic();
producer.send(producerRecord, new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {
// Given the default settings for zero data loss, this should basically never happen --
// between "infinite" retries, indefinite blocking on full buffers, and "infinite" request
// timeouts, callbacks with exceptions should never be invoked in practice. If the
// user overrode these settings, the best we can do is notify them of the failure via
// logging.
log.error("{} failed to send record to {}: {}", this, topic, e);
log.debug("{} Failed record: {}", this, preTransformRecord);
} else {
log.trace("{} Wrote record successfully: topic {} partition {} offset {}", this, recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
commitTaskRecord(preTransformRecord);
}
recordSent(producerRecord);
counter.completeRecord();
}
});
lastSendFailed = false;
} catch (RetriableException e) {
log.warn("{} Failed to send {}, backing off before retrying:", this, producerRecord, e);
toSend = toSend.subList(processed, toSend.size());
lastSendFailed = true;
counter.retryRemaining();
return false;
} catch (KafkaException e) {
throw new ConnectException("Unrecoverable exception trying to send", e);
}
processed++;
}
toSend = null;
return true;
}
Aggregations