Search in sources :

Example 91 with KafkaException

use of org.apache.kafka.common.KafkaException in project kafka by apache.

the class ListTransactionsResultTest method testAllFuturesFailIfLookupFails.

@Test
public void testAllFuturesFailIfLookupFails() {
    future.completeExceptionally(new KafkaException());
    assertFutureThrows(result.all(), KafkaException.class);
    assertFutureThrows(result.allByBrokerId(), KafkaException.class);
    assertFutureThrows(result.byBrokerId(), KafkaException.class);
}
Also used : KafkaException(org.apache.kafka.common.KafkaException) Test(org.junit.jupiter.api.Test)

Example 92 with KafkaException

use of org.apache.kafka.common.KafkaException in project kafka by apache.

the class SaslServerAuthenticator method createSaslKerberosServer.

private SaslServer createSaslKerberosServer(final AuthenticateCallbackHandler saslServerCallbackHandler, final Map<String, ?> configs, Subject subject) throws IOException {
    // server is using a JAAS-authenticated subject: determine service principal name and hostname from kafka server's subject.
    final String servicePrincipal = SaslClientAuthenticator.firstPrincipal(subject);
    KerberosName kerberosName;
    try {
        kerberosName = KerberosName.parse(servicePrincipal);
    } catch (IllegalArgumentException e) {
        throw new KafkaException("Principal has name with unexpected format " + servicePrincipal);
    }
    final String servicePrincipalName = kerberosName.serviceName();
    final String serviceHostname = kerberosName.hostName();
    LOG.debug("Creating SaslServer for {} with mechanism {}", kerberosName, saslMechanism);
    try {
        return Subject.doAs(subject, (PrivilegedExceptionAction<SaslServer>) () -> Sasl.createSaslServer(saslMechanism, servicePrincipalName, serviceHostname, configs, saslServerCallbackHandler));
    } catch (PrivilegedActionException e) {
        throw new SaslException("Kafka Server failed to create a SaslServer to interact with a client during session authentication", e.getCause());
    }
}
Also used : PrivilegedActionException(java.security.PrivilegedActionException) SaslServer(javax.security.sasl.SaslServer) KafkaException(org.apache.kafka.common.KafkaException) KerberosName(org.apache.kafka.common.security.kerberos.KerberosName) SaslException(javax.security.sasl.SaslException)

Example 93 with KafkaException

use of org.apache.kafka.common.KafkaException in project kafka by apache.

the class JaasUtils method isZkSaslEnabled.

public static boolean isZkSaslEnabled() {
    // Technically a client must also check if TLS mutual authentication has been configured,
    // but we will leave that up to the client code to determine since direct connectivity to ZooKeeper
    // has been deprecated in many clients and we don't wish to re-introduce a ZooKeeper jar dependency here.
    boolean zkSaslEnabled = Boolean.parseBoolean(System.getProperty(ZK_SASL_CLIENT, DEFAULT_ZK_SASL_CLIENT));
    String zkLoginContextName = System.getProperty(ZK_LOGIN_CONTEXT_NAME_KEY, DEFAULT_ZK_LOGIN_CONTEXT_NAME);
    LOG.debug("Checking login config for Zookeeper JAAS context {}", zkSecuritySysConfigString());
    boolean foundLoginConfigEntry;
    try {
        Configuration loginConf = Configuration.getConfiguration();
        foundLoginConfigEntry = loginConf.getAppConfigurationEntry(zkLoginContextName) != null;
    } catch (Exception e) {
        throw new KafkaException("Exception while loading Zookeeper JAAS login context " + zkSecuritySysConfigString(), e);
    }
    if (foundLoginConfigEntry && !zkSaslEnabled) {
        LOG.error("JAAS configuration is present, but system property " + ZK_SASL_CLIENT + " is set to false, which disables " + "SASL in the ZooKeeper client");
        throw new KafkaException("Exception while determining if ZooKeeper is secure " + zkSecuritySysConfigString());
    }
    return foundLoginConfigEntry;
}
Also used : Configuration(javax.security.auth.login.Configuration) KafkaException(org.apache.kafka.common.KafkaException) KafkaException(org.apache.kafka.common.KafkaException)

Example 94 with KafkaException

use of org.apache.kafka.common.KafkaException in project kafka by apache.

the class ConsumerNetworkClientTest method testMetadataFailurePropagated.

@Test
public void testMetadataFailurePropagated() {
    KafkaException metadataException = new KafkaException();
    metadata.fatalError(metadataException);
    try {
        consumerClient.poll(time.timer(Duration.ZERO));
        fail("Expected poll to throw exception");
    } catch (Exception e) {
        assertEquals(metadataException, e);
    }
}
Also used : KafkaException(org.apache.kafka.common.KafkaException) KafkaException(org.apache.kafka.common.KafkaException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) WakeupException(org.apache.kafka.common.errors.WakeupException) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) DisconnectException(org.apache.kafka.common.errors.DisconnectException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) AuthenticationException(org.apache.kafka.common.errors.AuthenticationException) Test(org.junit.jupiter.api.Test)

Example 95 with KafkaException

use of org.apache.kafka.common.KafkaException in project kafka by apache.

the class TransactionalMessageCopier method runEventLoop.

public static void runEventLoop(Namespace parsedArgs) {
    final String transactionalId = parsedArgs.getString("transactionalId");
    final String outputTopic = parsedArgs.getString("outputTopic");
    String consumerGroup = parsedArgs.getString("consumerGroup");
    final KafkaProducer<String, String> producer = createProducer(parsedArgs);
    final KafkaConsumer<String, String> consumer = createConsumer(parsedArgs);
    final AtomicLong remainingMessages = new AtomicLong(parsedArgs.getInt("maxMessages") == -1 ? Long.MAX_VALUE : parsedArgs.getInt("maxMessages"));
    boolean groupMode = parsedArgs.getBoolean("groupMode");
    String topicName = parsedArgs.getString("inputTopic");
    final AtomicLong numMessagesProcessedSinceLastRebalance = new AtomicLong(0);
    final AtomicLong totalMessageProcessed = new AtomicLong(0);
    if (groupMode) {
        consumer.subscribe(Collections.singleton(topicName), new ConsumerRebalanceListener() {

            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
            }

            @Override
            public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
                remainingMessages.set(partitions.stream().mapToLong(partition -> messagesRemaining(consumer, partition)).sum());
                numMessagesProcessedSinceLastRebalance.set(0);
                // We use message cap for remaining here as the remainingMessages are not set yet.
                System.out.println(statusAsJson(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId, "RebalanceComplete"));
            }
        });
    } else {
        TopicPartition inputPartition = new TopicPartition(topicName, parsedArgs.getInt("inputPartition"));
        consumer.assign(singleton(inputPartition));
        remainingMessages.set(Math.min(messagesRemaining(consumer, inputPartition), remainingMessages.get()));
    }
    final boolean enableRandomAborts = parsedArgs.getBoolean("enableRandomAborts");
    producer.initTransactions();
    final AtomicBoolean isShuttingDown = new AtomicBoolean(false);
    Exit.addShutdownHook("transactional-message-copier-shutdown-hook", () -> {
        isShuttingDown.set(true);
        consumer.wakeup();
        System.out.println(shutDownString(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId));
    });
    final boolean useGroupMetadata = parsedArgs.getBoolean("useGroupMetadata");
    try {
        Random random = new Random();
        while (!isShuttingDown.get() && remainingMessages.get() > 0) {
            System.out.println(statusAsJson(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId, "ProcessLoop"));
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(200));
            if (records.count() > 0) {
                try {
                    producer.beginTransaction();
                    for (ConsumerRecord<String, String> record : records) {
                        producer.send(producerRecordFromConsumerRecord(outputTopic, record));
                    }
                    long messagesSentWithinCurrentTxn = records.count();
                    ConsumerGroupMetadata groupMetadata = useGroupMetadata ? consumer.groupMetadata() : new ConsumerGroupMetadata(consumerGroup);
                    producer.sendOffsetsToTransaction(consumerPositions(consumer), groupMetadata);
                    if (enableRandomAborts && random.nextInt() % 3 == 0) {
                        abortTransactionAndResetPosition(producer, consumer);
                    } else {
                        producer.commitTransaction();
                        remainingMessages.getAndAdd(-messagesSentWithinCurrentTxn);
                        numMessagesProcessedSinceLastRebalance.getAndAdd(messagesSentWithinCurrentTxn);
                        totalMessageProcessed.getAndAdd(messagesSentWithinCurrentTxn);
                    }
                } catch (ProducerFencedException e) {
                    throw new KafkaException(String.format("The transactional.id %s has been claimed by another process", transactionalId), e);
                } catch (KafkaException e) {
                    log.debug("Aborting transaction after catching exception", e);
                    abortTransactionAndResetPosition(producer, consumer);
                }
            }
        }
    } catch (WakeupException e) {
        if (!isShuttingDown.get()) {
            // as part of shutdown.
            throw e;
        }
    } finally {
        Utils.closeQuietly(producer, "producer");
        Utils.closeQuietly(consumer, "consumer");
    }
}
Also used : ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Exit(org.apache.kafka.common.utils.Exit) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) Date(java.util.Date) LoggerFactory(org.slf4j.LoggerFactory) KafkaException(org.apache.kafka.common.KafkaException) SimpleDateFormat(java.text.SimpleDateFormat) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) Random(java.util.Random) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) Arguments.store(net.sourceforge.argparse4j.impl.Arguments.store) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) Collections.singleton(java.util.Collections.singleton) ArgumentParser(net.sourceforge.argparse4j.inf.ArgumentParser) Namespace(net.sourceforge.argparse4j.inf.Namespace) Duration(java.time.Duration) Map(java.util.Map) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) DateFormat(java.text.DateFormat) Utils(org.apache.kafka.common.utils.Utils) TopicPartition(org.apache.kafka.common.TopicPartition) Logger(org.slf4j.Logger) Properties(java.util.Properties) Arguments.storeTrue(net.sourceforge.argparse4j.impl.Arguments.storeTrue) WakeupException(org.apache.kafka.common.errors.WakeupException) ArgumentParsers(net.sourceforge.argparse4j.ArgumentParsers) Collection(java.util.Collection) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) AtomicLong(java.util.concurrent.atomic.AtomicLong) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Collections(java.util.Collections) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) WakeupException(org.apache.kafka.common.errors.WakeupException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) Random(java.util.Random) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException)

Aggregations

KafkaException (org.apache.kafka.common.KafkaException)262 Test (org.junit.Test)69 TopicPartition (org.apache.kafka.common.TopicPartition)56 Test (org.junit.jupiter.api.Test)47 HashMap (java.util.HashMap)40 IOException (java.io.IOException)39 StreamsException (org.apache.kafka.streams.errors.StreamsException)34 Map (java.util.Map)32 TimeoutException (org.apache.kafka.common.errors.TimeoutException)28 ArrayList (java.util.ArrayList)27 List (java.util.List)21 ByteBuffer (java.nio.ByteBuffer)19 ExecutionException (java.util.concurrent.ExecutionException)19 ConfigException (org.apache.kafka.common.config.ConfigException)16 TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)14 HashSet (java.util.HashSet)13 Properties (java.util.Properties)13 Set (java.util.Set)11 Collectors (java.util.stream.Collectors)11 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)11