Search in sources :

Example 61 with Callback

use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.

the class EosTestDriver method generate.

static void generate(final String kafka) {
    Runtime.getRuntime().addShutdownHook(new Thread() {

        @Override
        public void run() {
            System.out.println("Terminating");
            System.out.flush();
            isRunning = false;
        }
    });
    final Properties producerProps = new Properties();
    producerProps.put(ProducerConfig.CLIENT_ID_CONFIG, "EosTest");
    producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
    producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
    producerProps.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
    final KafkaProducer<String, Integer> producer = new KafkaProducer<>(producerProps);
    final Random rand = new Random(System.currentTimeMillis());
    while (isRunning) {
        final String key = "" + rand.nextInt(MAX_NUMBER_OF_KEYS);
        final int value = rand.nextInt(10000);
        final ProducerRecord<String, Integer> record = new ProducerRecord<>("data", key, value);
        producer.send(record, new Callback() {

            @Override
            public void onCompletion(final RecordMetadata metadata, final Exception exception) {
                if (exception != null) {
                    exception.printStackTrace(System.err);
                    System.err.flush();
                    if (exception instanceof TimeoutException) {
                        try {
                            // message == org.apache.kafka.common.errors.TimeoutException: Expiring 4 record(s) for data-0: 30004 ms has passed since last attempt plus backoff time
                            final int expired = Integer.parseInt(exception.getMessage().split(" ")[2]);
                            updateNumRecordsProduces(-expired);
                        } catch (Exception ignore) {
                        }
                    }
                }
            }
        });
        updateNumRecordsProduces(1);
        if (numRecordsProduced % 1000 == 0) {
            System.out.println(numRecordsProduced + " records produced");
            System.out.flush();
        }
        Utils.sleep(rand.nextInt(10));
    }
    producer.close();
    System.out.println("Producer closed: " + numRecordsProduced + " records produced");
    final Properties props = new Properties();
    props.put(ConsumerConfig.CLIENT_ID_CONFIG, "verifier");
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
    props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, IsolationLevel.READ_COMMITTED.toString().toLowerCase(Locale.ROOT));
    try (final KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(props)) {
        final List<TopicPartition> partitions = getAllPartitions(consumer, "data");
        System.out.println("Partitions: " + partitions);
        consumer.assign(partitions);
        consumer.seekToEnd(partitions);
        for (final TopicPartition tp : partitions) {
            System.out.println("End-offset for " + tp + " is " + consumer.position(tp));
        }
    }
    System.out.flush();
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Properties(java.util.Properties) SerializationException(org.apache.kafka.common.errors.SerializationException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Callback(org.apache.kafka.clients.producer.Callback) Random(java.util.Random) TopicPartition(org.apache.kafka.common.TopicPartition) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Example 62 with Callback

use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.

the class EosTestDriver method verifyAllTransactionFinished.

private static void verifyAllTransactionFinished(final KafkaConsumer<byte[], byte[]> consumer, final String kafka, final boolean withRepartitioning) {
    final String[] topics;
    if (withRepartitioning) {
        topics = new String[] { "echo", "min", "sum", "repartition", "max", "cnt" };
    } else {
        topics = new String[] { "echo", "min", "sum" };
    }
    final List<TopicPartition> partitions = getAllPartitions(consumer, topics);
    consumer.assign(partitions);
    consumer.seekToEnd(partitions);
    for (final TopicPartition tp : partitions) {
        System.out.println(tp + " at position " + consumer.position(tp));
    }
    final Properties producerProps = new Properties();
    producerProps.put(ProducerConfig.CLIENT_ID_CONFIG, "VerifyProducer");
    producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
    producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    producerProps.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
    try (final KafkaProducer<String, String> producer = new KafkaProducer<>(producerProps)) {
        for (final TopicPartition tp : partitions) {
            final ProducerRecord<String, String> record = new ProducerRecord<>(tp.topic(), tp.partition(), "key", "value");
            producer.send(record, new Callback() {

                @Override
                public void onCompletion(final RecordMetadata metadata, final Exception exception) {
                    if (exception != null) {
                        exception.printStackTrace(System.err);
                        System.err.flush();
                        Exit.exit(1);
                    }
                }
            });
        }
    }
    final StringDeserializer stringDeserializer = new StringDeserializer();
    long maxWaitTime = System.currentTimeMillis() + MAX_IDLE_TIME_MS;
    while (!partitions.isEmpty() && System.currentTimeMillis() < maxWaitTime) {
        final ConsumerRecords<byte[], byte[]> records = consumer.poll(100);
        if (records.isEmpty()) {
            System.out.println("No data received.");
            for (final TopicPartition tp : partitions) {
                System.out.println(tp + " at position " + consumer.position(tp));
            }
        }
        for (final ConsumerRecord<byte[], byte[]> record : records) {
            maxWaitTime = System.currentTimeMillis() + MAX_IDLE_TIME_MS;
            final String topic = record.topic();
            final TopicPartition tp = new TopicPartition(topic, record.partition());
            try {
                final String key = stringDeserializer.deserialize(topic, record.key());
                final String value = stringDeserializer.deserialize(topic, record.value());
                if (!("key".equals(key) && "value".equals(value) && partitions.remove(tp))) {
                    throw new RuntimeException("Post transactions verification failed. Received unexpected verification record: " + "Expected record <'key','value'> from one of " + partitions + " but got" + " <" + key + "," + value + "> [" + record.topic() + ", " + record.partition() + "]");
                } else {
                    System.out.println("Verifying " + tp + " successful.");
                }
            } catch (final SerializationException e) {
                throw new RuntimeException("Post transactions verification failed. Received unexpected verification record: " + "Expected record <'key','value'> from one of " + partitions + " but got " + record, e);
            }
        }
    }
    if (!partitions.isEmpty()) {
        throw new RuntimeException("Could not read all verification records. Did not receive any new record within the last " + (MAX_IDLE_TIME_MS / 1000) + " sec.");
    }
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) SerializationException(org.apache.kafka.common.errors.SerializationException) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) Properties(java.util.Properties) SerializationException(org.apache.kafka.common.errors.SerializationException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Callback(org.apache.kafka.clients.producer.Callback) TopicPartition(org.apache.kafka.common.TopicPartition) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord)

Example 63 with Callback

use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.

the class StreamsRepeatingIntegerKeyProducer method main.

public static void main(String[] args) {
    System.out.println("StreamsTest instance started");
    final String kafka = args.length > 0 ? args[0] : "localhost:9092";
    final String configString = args.length > 2 ? args[2] : null;
    final Map<String, String> configs = SystemTestUtil.parseConfigs(configString);
    System.out.println("Using provided configs " + configs);
    final int numMessages = configs.containsKey("num_messages") ? Integer.parseInt(configs.get("num_messages")) : 1000;
    final Properties producerProps = new Properties();
    producerProps.put(ProducerConfig.CLIENT_ID_CONFIG, "StreamsRepeatingIntegerKeyProducer");
    producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
    producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    producerProps.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
    final String value = "testingValue";
    Integer key = 0;
    Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {

        @Override
        public void run() {
            keepProducing = false;
        }
    }));
    final String[] topics = configs.get("topics").split(";");
    final int totalMessagesToProduce = numMessages * topics.length;
    try (final KafkaProducer<String, String> kafkaProducer = new KafkaProducer<>(producerProps)) {
        while (keepProducing && messageCounter < totalMessagesToProduce) {
            for (final String topic : topics) {
                final ProducerRecord<String, String> producerRecord = new ProducerRecord<>(topic, key.toString(), value + key);
                kafkaProducer.send(producerRecord, new Callback() {

                    @Override
                    public void onCompletion(final RecordMetadata metadata, final Exception exception) {
                        if (exception != null) {
                            exception.printStackTrace(System.err);
                            System.err.flush();
                            if (exception instanceof TimeoutException) {
                                try {
                                    // message == org.apache.kafka.common.errors.TimeoutException: Expiring 4 record(s) for data-0: 30004 ms has passed since last attempt plus backoff time
                                    final int expired = Integer.parseInt(exception.getMessage().split(" ")[2]);
                                    messageCounter -= expired;
                                } catch (final Exception ignore) {
                                }
                            }
                        }
                    }
                });
                messageCounter += 1;
            }
            key += 1;
            if (key % 1000 == 0) {
                System.out.println("Sent 1000 messages");
                Utils.sleep(100);
                key = 0;
            }
        }
    }
    System.out.println("Producer shut down now, sent total " + messageCounter + " of requested " + totalMessagesToProduce);
    System.out.flush();
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) Properties(java.util.Properties) TimeoutException(org.apache.kafka.common.errors.TimeoutException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Callback(org.apache.kafka.clients.producer.Callback) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Example 64 with Callback

use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.

the class RecordCollectorImpl method send.

@Override
public <K, V> void send(final String topic, final K key, final V value, final Integer partition, final Long timestamp, final Serializer<K> keySerializer, final Serializer<V> valueSerializer) {
    checkForException();
    final byte[] keyBytes = keySerializer.serialize(topic, key);
    final byte[] valBytes = valueSerializer.serialize(topic, value);
    final ProducerRecord<byte[], byte[]> serializedRecord = new ProducerRecord<>(topic, partition, timestamp, keyBytes, valBytes);
    try {
        producer.send(serializedRecord, new Callback() {

            @Override
            public void onCompletion(final RecordMetadata metadata, final Exception exception) {
                if (exception == null) {
                    if (sendException != null) {
                        return;
                    }
                    final TopicPartition tp = new TopicPartition(metadata.topic(), metadata.partition());
                    offsets.put(tp, metadata.offset());
                } else {
                    if (sendException == null) {
                        if (exception instanceof ProducerFencedException) {
                            log.warn(LOG_MESSAGE, key, value, timestamp, topic, exception.getMessage());
                            sendException = new ProducerFencedException(String.format(EXCEPTION_MESSAGE, logPrefix, "producer got fenced", key, value, timestamp, topic, exception.getMessage()));
                        } else {
                            if (productionExceptionIsFatal(exception)) {
                                recordSendError(key, value, timestamp, topic, exception);
                            } else if (productionExceptionHandler.handle(serializedRecord, exception) == ProductionExceptionHandlerResponse.FAIL) {
                                recordSendError(key, value, timestamp, topic, exception);
                            } else {
                                log.debug(HANDLER_CONTINUED_MESSAGE, key, value, timestamp, topic, exception);
                            }
                        }
                    }
                }
            }
        });
    } catch (final TimeoutException e) {
        log.error("Timeout exception caught when sending record to topic {}. " + "This might happen if the producer cannot send data to the Kafka cluster and thus, " + "its internal buffer fills up. " + "You can increase producer parameter `max.block.ms` to increase this timeout.", topic);
        throw new StreamsException(String.format("%sFailed to send record to topic %s due to timeout.", logPrefix, topic));
    } catch (final Exception uncaughtException) {
        throw new StreamsException(String.format(EXCEPTION_MESSAGE, logPrefix, "an error caught", key, value, timestamp, topic, uncaughtException.getMessage()), uncaughtException);
    }
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Callback(org.apache.kafka.clients.producer.Callback) TopicPartition(org.apache.kafka.common.TopicPartition) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) StreamsException(org.apache.kafka.streams.errors.StreamsException) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) StreamsException(org.apache.kafka.streams.errors.StreamsException) RetriableException(org.apache.kafka.common.errors.RetriableException) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) SecurityDisabledException(org.apache.kafka.common.errors.SecurityDisabledException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) AuthorizationException(org.apache.kafka.common.errors.AuthorizationException) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) AuthenticationException(org.apache.kafka.common.errors.AuthenticationException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Example 65 with Callback

use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.

the class KafkaStatusBackingStoreTest method putTaskState.

@Test
public void putTaskState() {
    KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
    Converter converter = mock(Converter.class);
    KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);
    byte[] value = new byte[0];
    expect(converter.fromConnectData(eq(STATUS_TOPIC), anyObject(Schema.class), anyObject(Struct.class))).andStubReturn(value);
    final Capture<Callback> callbackCapture = newCapture();
    kafkaBasedLog.send(eq("status-task-conn-0"), eq(value), capture(callbackCapture));
    expectLastCall().andAnswer(new IAnswer<Void>() {

        @Override
        public Void answer() throws Throwable {
            callbackCapture.getValue().onCompletion(null, null);
            return null;
        }
    });
    replayAll();
    TaskStatus status = new TaskStatus(TASK, TaskStatus.State.RUNNING, WORKER_ID, 0);
    store.put(status);
    // state is not visible until read back from the log
    assertEquals(null, store.get(TASK));
    verifyAll();
}
Also used : Schema(org.apache.kafka.connect.data.Schema) TaskStatus(org.apache.kafka.connect.runtime.TaskStatus) Struct(org.apache.kafka.connect.data.Struct) Callback(org.apache.kafka.clients.producer.Callback) MockTime(org.apache.kafka.common.utils.MockTime) Test(org.junit.Test)

Aggregations

Callback (org.apache.kafka.clients.producer.Callback)81 Test (org.junit.Test)47 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)39 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)37 KafkaException (org.apache.kafka.common.KafkaException)21 Future (java.util.concurrent.Future)18 TimeoutException (org.apache.kafka.common.errors.TimeoutException)18 ExecutionException (java.util.concurrent.ExecutionException)15 ArrayList (java.util.ArrayList)14 List (java.util.List)13 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)13 MockProducer (org.apache.kafka.clients.producer.MockProducer)13 HashMap (java.util.HashMap)12 Properties (java.util.Properties)12 DefaultPartitioner (org.apache.kafka.clients.producer.internals.DefaultPartitioner)12 TopicPartition (org.apache.kafka.common.TopicPartition)12 Schema (org.apache.kafka.connect.data.Schema)12 Struct (org.apache.kafka.connect.data.Struct)12 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)11 StreamsException (org.apache.kafka.streams.errors.StreamsException)11