use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.
the class KafkaStatusBackingStoreTest method putConnectorStateNonRetriableFailure.
@Test
public void putConnectorStateNonRetriableFailure() {
KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
Converter converter = mock(Converter.class);
KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);
byte[] value = new byte[0];
expect(converter.fromConnectData(eq(STATUS_TOPIC), anyObject(Schema.class), anyObject(Struct.class))).andStubReturn(value);
final Capture<Callback> callbackCapture = newCapture();
kafkaBasedLog.send(eq("status-connector-conn"), eq(value), capture(callbackCapture));
expectLastCall().andAnswer(new IAnswer<Void>() {
@Override
public Void answer() throws Throwable {
callbackCapture.getValue().onCompletion(null, new UnknownServerException());
return null;
}
});
replayAll();
// the error is logged and ignored
ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.RUNNING, WORKER_ID, 0);
store.put(status);
// state is not visible until read back from the log
assertEquals(null, store.get(CONNECTOR));
verifyAll();
}
use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.
the class KafkaStatusBackingStoreTest method putSafeOverridesValueSetBySameWorker.
@Test
public void putSafeOverridesValueSetBySameWorker() {
final byte[] value = new byte[0];
KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
Converter converter = mock(Converter.class);
final KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);
// the persisted came from the same host, but has a newer generation
Map<String, Object> firstStatusRead = new HashMap<>();
firstStatusRead.put("worker_id", WORKER_ID);
firstStatusRead.put("state", "RUNNING");
firstStatusRead.put("generation", 1L);
Map<String, Object> secondStatusRead = new HashMap<>();
secondStatusRead.put("worker_id", WORKER_ID);
secondStatusRead.put("state", "UNASSIGNED");
secondStatusRead.put("generation", 0L);
expect(converter.toConnectData(STATUS_TOPIC, value)).andReturn(new SchemaAndValue(null, firstStatusRead)).andReturn(new SchemaAndValue(null, secondStatusRead));
expect(converter.fromConnectData(eq(STATUS_TOPIC), anyObject(Schema.class), anyObject(Struct.class))).andStubReturn(value);
final Capture<Callback> callbackCapture = newCapture();
kafkaBasedLog.send(eq("status-connector-conn"), eq(value), capture(callbackCapture));
expectLastCall().andAnswer(new IAnswer<Void>() {
@Override
public Void answer() throws Throwable {
callbackCapture.getValue().onCompletion(null, null);
store.read(consumerRecord(1, "status-connector-conn", value));
return null;
}
});
replayAll();
store.read(consumerRecord(0, "status-connector-conn", value));
store.putSafe(new ConnectorStatus(CONNECTOR, ConnectorStatus.State.UNASSIGNED, WORKER_ID, 0));
ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.UNASSIGNED, WORKER_ID, 0);
assertEquals(status, store.get(CONNECTOR));
verifyAll();
}
use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.
the class ProducerPerformance method main.
public static void main(String[] args) throws Exception {
ArgumentParser parser = argParser();
try {
Namespace res = parser.parseArgs(args);
/* parse args */
String topicName = res.getString("topic");
long numRecords = res.getLong("numRecords");
Integer recordSize = res.getInt("recordSize");
int throughput = res.getInt("throughput");
List<String> producerProps = res.getList("producerConfig");
String producerConfig = res.getString("producerConfigFile");
String payloadFilePath = res.getString("payloadFile");
String transactionalId = res.getString("transactionalId");
boolean shouldPrintMetrics = res.getBoolean("printMetrics");
long transactionDurationMs = res.getLong("transactionDurationMs");
boolean transactionsEnabled = 0 < transactionDurationMs;
// since default value gets printed with the help text, we are escaping \n there and replacing it with correct value here.
String payloadDelimiter = res.getString("payloadDelimiter").equals("\\n") ? "\n" : res.getString("payloadDelimiter");
if (producerProps == null && producerConfig == null) {
throw new ArgumentParserException("Either --producer-props or --producer.config must be specified.", parser);
}
List<byte[]> payloadByteList = new ArrayList<>();
if (payloadFilePath != null) {
Path path = Paths.get(payloadFilePath);
System.out.println("Reading payloads from: " + path.toAbsolutePath());
if (Files.notExists(path) || Files.size(path) == 0) {
throw new IllegalArgumentException("File does not exist or empty file provided.");
}
String[] payloadList = new String(Files.readAllBytes(path), "UTF-8").split(payloadDelimiter);
System.out.println("Number of messages read: " + payloadList.length);
for (String payload : payloadList) {
payloadByteList.add(payload.getBytes(StandardCharsets.UTF_8));
}
}
Properties props = new Properties();
if (producerConfig != null) {
props.putAll(Utils.loadProps(producerConfig));
}
if (producerProps != null)
for (String prop : producerProps) {
String[] pieces = prop.split("=");
if (pieces.length != 2)
throw new IllegalArgumentException("Invalid property: " + prop);
props.put(pieces[0], pieces[1]);
}
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
if (transactionsEnabled)
props.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, transactionalId);
KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(props);
if (transactionsEnabled)
producer.initTransactions();
/* setup perf test */
byte[] payload = null;
Random random = new Random(0);
if (recordSize != null) {
payload = new byte[recordSize];
for (int i = 0; i < payload.length; ++i) payload[i] = (byte) (random.nextInt(26) + 65);
}
ProducerRecord<byte[], byte[]> record;
Stats stats = new Stats(numRecords, 5000);
long startMs = System.currentTimeMillis();
ThroughputThrottler throttler = new ThroughputThrottler(throughput, startMs);
int currentTransactionSize = 0;
long transactionStartTime = 0;
for (int i = 0; i < numRecords; i++) {
if (transactionsEnabled && currentTransactionSize == 0) {
producer.beginTransaction();
transactionStartTime = System.currentTimeMillis();
}
if (payloadFilePath != null) {
payload = payloadByteList.get(random.nextInt(payloadByteList.size()));
}
record = new ProducerRecord<>(topicName, payload);
long sendStartMs = System.currentTimeMillis();
Callback cb = stats.nextCompletion(sendStartMs, payload.length, stats);
producer.send(record, cb);
currentTransactionSize++;
if (transactionsEnabled && transactionDurationMs <= (sendStartMs - transactionStartTime)) {
producer.commitTransaction();
currentTransactionSize = 0;
}
if (throttler.shouldThrottle(i, sendStartMs)) {
throttler.throttle();
}
}
if (transactionsEnabled && currentTransactionSize != 0)
producer.commitTransaction();
if (!shouldPrintMetrics) {
producer.close();
/* print final results */
stats.printTotal();
} else {
// Make sure all messages are sent before printing out the stats and the metrics
// We need to do this in a different branch for now since tests/kafkatest/sanity_checks/test_performance_services.py
// expects this class to work with older versions of the client jar that don't support flush().
producer.flush();
/* print final results */
stats.printTotal();
/* print out metrics */
ToolsUtils.printMetrics(producer.metrics());
producer.close();
}
} catch (ArgumentParserException e) {
if (args.length == 0) {
parser.printHelp();
Exit.exit(0);
} else {
parser.handleError(e);
Exit.exit(1);
}
}
}
use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.
the class RecordCollectorTest method shouldThrowStreamsExceptionOnSubsequentCallIfASendFailsWithDefaultExceptionHandler.
@SuppressWarnings("unchecked")
@Test
public void shouldThrowStreamsExceptionOnSubsequentCallIfASendFailsWithDefaultExceptionHandler() {
final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
}, "test", logContext, new DefaultProductionExceptionHandler());
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
try {
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
fail("Should have thrown StreamsException");
} catch (final StreamsException expected) {
/* ok */
}
}
use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.
the class RecordCollectorTest method shouldThrowStreamsExceptionOnFlushIfASendFailedWithDefaultExceptionHandler.
@SuppressWarnings("unchecked")
@Test
public void shouldThrowStreamsExceptionOnFlushIfASendFailedWithDefaultExceptionHandler() {
final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
}, "test", logContext, new DefaultProductionExceptionHandler());
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
try {
collector.flush();
fail("Should have thrown StreamsException");
} catch (final StreamsException expected) {
/* ok */
}
}
Aggregations