use of org.apache.kafka.common.KafkaException in project kafka by apache.
the class ListTransactionsResultTest method testAllFuturesFailIfLookupFails.
@Test
public void testAllFuturesFailIfLookupFails() {
future.completeExceptionally(new KafkaException());
assertFutureThrows(result.all(), KafkaException.class);
assertFutureThrows(result.allByBrokerId(), KafkaException.class);
assertFutureThrows(result.byBrokerId(), KafkaException.class);
}
use of org.apache.kafka.common.KafkaException in project kafka by apache.
the class SaslServerAuthenticator method createSaslKerberosServer.
private SaslServer createSaslKerberosServer(final AuthenticateCallbackHandler saslServerCallbackHandler, final Map<String, ?> configs, Subject subject) throws IOException {
// server is using a JAAS-authenticated subject: determine service principal name and hostname from kafka server's subject.
final String servicePrincipal = SaslClientAuthenticator.firstPrincipal(subject);
KerberosName kerberosName;
try {
kerberosName = KerberosName.parse(servicePrincipal);
} catch (IllegalArgumentException e) {
throw new KafkaException("Principal has name with unexpected format " + servicePrincipal);
}
final String servicePrincipalName = kerberosName.serviceName();
final String serviceHostname = kerberosName.hostName();
LOG.debug("Creating SaslServer for {} with mechanism {}", kerberosName, saslMechanism);
try {
return Subject.doAs(subject, (PrivilegedExceptionAction<SaslServer>) () -> Sasl.createSaslServer(saslMechanism, servicePrincipalName, serviceHostname, configs, saslServerCallbackHandler));
} catch (PrivilegedActionException e) {
throw new SaslException("Kafka Server failed to create a SaslServer to interact with a client during session authentication", e.getCause());
}
}
use of org.apache.kafka.common.KafkaException in project kafka by apache.
the class JaasUtils method isZkSaslEnabled.
public static boolean isZkSaslEnabled() {
// Technically a client must also check if TLS mutual authentication has been configured,
// but we will leave that up to the client code to determine since direct connectivity to ZooKeeper
// has been deprecated in many clients and we don't wish to re-introduce a ZooKeeper jar dependency here.
boolean zkSaslEnabled = Boolean.parseBoolean(System.getProperty(ZK_SASL_CLIENT, DEFAULT_ZK_SASL_CLIENT));
String zkLoginContextName = System.getProperty(ZK_LOGIN_CONTEXT_NAME_KEY, DEFAULT_ZK_LOGIN_CONTEXT_NAME);
LOG.debug("Checking login config for Zookeeper JAAS context {}", zkSecuritySysConfigString());
boolean foundLoginConfigEntry;
try {
Configuration loginConf = Configuration.getConfiguration();
foundLoginConfigEntry = loginConf.getAppConfigurationEntry(zkLoginContextName) != null;
} catch (Exception e) {
throw new KafkaException("Exception while loading Zookeeper JAAS login context " + zkSecuritySysConfigString(), e);
}
if (foundLoginConfigEntry && !zkSaslEnabled) {
LOG.error("JAAS configuration is present, but system property " + ZK_SASL_CLIENT + " is set to false, which disables " + "SASL in the ZooKeeper client");
throw new KafkaException("Exception while determining if ZooKeeper is secure " + zkSecuritySysConfigString());
}
return foundLoginConfigEntry;
}
use of org.apache.kafka.common.KafkaException in project kafka by apache.
the class ConsumerNetworkClientTest method testMetadataFailurePropagated.
@Test
public void testMetadataFailurePropagated() {
KafkaException metadataException = new KafkaException();
metadata.fatalError(metadataException);
try {
consumerClient.poll(time.timer(Duration.ZERO));
fail("Expected poll to throw exception");
} catch (Exception e) {
assertEquals(metadataException, e);
}
}
use of org.apache.kafka.common.KafkaException in project kafka by apache.
the class TransactionalMessageCopier method runEventLoop.
public static void runEventLoop(Namespace parsedArgs) {
final String transactionalId = parsedArgs.getString("transactionalId");
final String outputTopic = parsedArgs.getString("outputTopic");
String consumerGroup = parsedArgs.getString("consumerGroup");
final KafkaProducer<String, String> producer = createProducer(parsedArgs);
final KafkaConsumer<String, String> consumer = createConsumer(parsedArgs);
final AtomicLong remainingMessages = new AtomicLong(parsedArgs.getInt("maxMessages") == -1 ? Long.MAX_VALUE : parsedArgs.getInt("maxMessages"));
boolean groupMode = parsedArgs.getBoolean("groupMode");
String topicName = parsedArgs.getString("inputTopic");
final AtomicLong numMessagesProcessedSinceLastRebalance = new AtomicLong(0);
final AtomicLong totalMessageProcessed = new AtomicLong(0);
if (groupMode) {
consumer.subscribe(Collections.singleton(topicName), new ConsumerRebalanceListener() {
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
}
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
remainingMessages.set(partitions.stream().mapToLong(partition -> messagesRemaining(consumer, partition)).sum());
numMessagesProcessedSinceLastRebalance.set(0);
// We use message cap for remaining here as the remainingMessages are not set yet.
System.out.println(statusAsJson(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId, "RebalanceComplete"));
}
});
} else {
TopicPartition inputPartition = new TopicPartition(topicName, parsedArgs.getInt("inputPartition"));
consumer.assign(singleton(inputPartition));
remainingMessages.set(Math.min(messagesRemaining(consumer, inputPartition), remainingMessages.get()));
}
final boolean enableRandomAborts = parsedArgs.getBoolean("enableRandomAborts");
producer.initTransactions();
final AtomicBoolean isShuttingDown = new AtomicBoolean(false);
Exit.addShutdownHook("transactional-message-copier-shutdown-hook", () -> {
isShuttingDown.set(true);
consumer.wakeup();
System.out.println(shutDownString(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId));
});
final boolean useGroupMetadata = parsedArgs.getBoolean("useGroupMetadata");
try {
Random random = new Random();
while (!isShuttingDown.get() && remainingMessages.get() > 0) {
System.out.println(statusAsJson(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId, "ProcessLoop"));
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(200));
if (records.count() > 0) {
try {
producer.beginTransaction();
for (ConsumerRecord<String, String> record : records) {
producer.send(producerRecordFromConsumerRecord(outputTopic, record));
}
long messagesSentWithinCurrentTxn = records.count();
ConsumerGroupMetadata groupMetadata = useGroupMetadata ? consumer.groupMetadata() : new ConsumerGroupMetadata(consumerGroup);
producer.sendOffsetsToTransaction(consumerPositions(consumer), groupMetadata);
if (enableRandomAborts && random.nextInt() % 3 == 0) {
abortTransactionAndResetPosition(producer, consumer);
} else {
producer.commitTransaction();
remainingMessages.getAndAdd(-messagesSentWithinCurrentTxn);
numMessagesProcessedSinceLastRebalance.getAndAdd(messagesSentWithinCurrentTxn);
totalMessageProcessed.getAndAdd(messagesSentWithinCurrentTxn);
}
} catch (ProducerFencedException e) {
throw new KafkaException(String.format("The transactional.id %s has been claimed by another process", transactionalId), e);
} catch (KafkaException e) {
log.debug("Aborting transaction after catching exception", e);
abortTransactionAndResetPosition(producer, consumer);
}
}
}
} catch (WakeupException e) {
if (!isShuttingDown.get()) {
// as part of shutdown.
throw e;
}
} finally {
Utils.closeQuietly(producer, "producer");
Utils.closeQuietly(consumer, "consumer");
}
}
Aggregations