use of org.apache.kafka.common.config.ConfigException in project kafka by apache.
the class KafkaStatusBackingStore method configure.
@Override
public void configure(final WorkerConfig config) {
this.statusTopic = config.getString(DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG);
if (this.statusTopic == null || this.statusTopic.trim().length() == 0)
throw new ConfigException("Must specify topic for connector status.");
String clusterId = ConnectUtils.lookupKafkaClusterId(config);
Map<String, Object> originals = config.originals();
Map<String, Object> producerProps = new HashMap<>(originals);
producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
// we handle retries in this class
producerProps.put(ProducerConfig.RETRIES_CONFIG, 0);
// disable idempotence since retries is force to 0
producerProps.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, false);
ConnectUtils.addMetricsContextProperties(producerProps, config, clusterId);
Map<String, Object> consumerProps = new HashMap<>(originals);
consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
ConnectUtils.addMetricsContextProperties(consumerProps, config, clusterId);
Map<String, Object> adminProps = new HashMap<>(originals);
ConnectUtils.addMetricsContextProperties(adminProps, config, clusterId);
Supplier<TopicAdmin> adminSupplier;
if (topicAdminSupplier != null) {
adminSupplier = topicAdminSupplier;
} else {
// Create our own topic admin supplier that we'll close when we're stopped
ownTopicAdmin = new SharedTopicAdmin(adminProps);
adminSupplier = ownTopicAdmin;
}
Map<String, Object> topicSettings = config instanceof DistributedConfig ? ((DistributedConfig) config).statusStorageTopicSettings() : Collections.emptyMap();
NewTopic topicDescription = TopicAdmin.defineTopic(statusTopic).config(// first so that we override user-supplied settings as needed
topicSettings).compacted().partitions(config.getInt(DistributedConfig.STATUS_STORAGE_PARTITIONS_CONFIG)).replicationFactor(config.getShort(DistributedConfig.STATUS_STORAGE_REPLICATION_FACTOR_CONFIG)).build();
Callback<ConsumerRecord<String, byte[]>> readCallback = (error, record) -> read(record);
this.kafkaLog = createKafkaBasedLog(statusTopic, producerProps, consumerProps, readCallback, topicDescription, adminSupplier);
}
use of org.apache.kafka.common.config.ConfigException in project kafka by apache.
the class ListDeserializer method configureListClass.
private void configureListClass(Map<String, ?> configs, boolean isKey) {
String listTypePropertyName = isKey ? CommonClientConfigs.DEFAULT_LIST_KEY_SERDE_TYPE_CLASS : CommonClientConfigs.DEFAULT_LIST_VALUE_SERDE_TYPE_CLASS;
final Object listClassOrName = configs.get(listTypePropertyName);
if (listClassOrName == null) {
throw new ConfigException("Not able to determine the list class because it was neither passed via the constructor nor set in the config.");
}
try {
if (listClassOrName instanceof String) {
listClass = Utils.loadClass((String) listClassOrName, Object.class);
} else if (listClassOrName instanceof Class) {
listClass = (Class<?>) listClassOrName;
} else {
throw new KafkaException("Could not determine the list class instance using \"" + listTypePropertyName + "\" property.");
}
} catch (final ClassNotFoundException e) {
throw new ConfigException(listTypePropertyName, listClassOrName, "Deserializer's list class \"" + listClassOrName + "\" could not be found.");
}
}
use of org.apache.kafka.common.config.ConfigException in project kafka by apache.
the class SslFactory method configure.
@SuppressWarnings("unchecked")
@Override
public void configure(Map<String, ?> configs) throws KafkaException {
if (sslEngineFactory != null) {
throw new IllegalStateException("SslFactory was already configured.");
}
this.endpointIdentification = (String) configs.get(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG);
// The input map must be a mutable RecordingMap in production.
Map<String, Object> nextConfigs = (Map<String, Object>) configs;
if (clientAuthConfigOverride != null) {
nextConfigs.put(BrokerSecurityConfigs.SSL_CLIENT_AUTH_CONFIG, clientAuthConfigOverride);
}
SslEngineFactory builder = instantiateSslEngineFactory(nextConfigs);
if (keystoreVerifiableUsingTruststore) {
try {
SslEngineValidator.validate(builder, builder);
} catch (Exception e) {
throw new ConfigException("A client SSLEngine created with the provided settings " + "can't connect to a server SSLEngine created with those settings.", e);
}
}
this.sslEngineFactory = builder;
}
use of org.apache.kafka.common.config.ConfigException in project kafka by apache.
the class SslFactoryTest method testReconfigurationWithoutKeystore.
@Test
public void testReconfigurationWithoutKeystore() throws Exception {
File trustStoreFile = File.createTempFile("truststore", ".jks");
Map<String, Object> sslConfig = sslConfigsBuilder(Mode.SERVER).createNewTrustStore(trustStoreFile).build();
sslConfig.remove(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG);
sslConfig.remove(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG);
sslConfig.remove(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG);
SslFactory sslFactory = new SslFactory(Mode.SERVER);
sslFactory.configure(sslConfig);
SSLContext sslContext = ((DefaultSslEngineFactory) sslFactory.sslEngineFactory()).sslContext();
assertNotNull(sslContext, "SSL context not created");
assertSame(sslContext, ((DefaultSslEngineFactory) sslFactory.sslEngineFactory()).sslContext(), "SSL context recreated unnecessarily");
assertFalse(sslFactory.createSslEngine("localhost", 0).getUseClientMode());
File newTrustStoreFile = File.createTempFile("truststore", ".jks");
sslConfig = sslConfigsBuilder(Mode.SERVER).createNewTrustStore(newTrustStoreFile).build();
sslConfig.remove(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG);
sslConfig.remove(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG);
sslConfig.remove(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG);
sslFactory.reconfigure(sslConfig);
assertNotSame(sslContext, ((DefaultSslEngineFactory) sslFactory.sslEngineFactory()).sslContext(), "SSL context not recreated");
sslConfig = sslConfigsBuilder(Mode.SERVER).createNewTrustStore(newTrustStoreFile).build();
try {
sslFactory.validateReconfiguration(sslConfig);
fail("Keystore configured dynamically for listener without previous keystore");
} catch (ConfigException e) {
// Expected exception
}
}
use of org.apache.kafka.common.config.ConfigException in project kafka by apache.
the class SslFactoryTest method testReconfigurationWithoutTruststore.
@Test
public void testReconfigurationWithoutTruststore() throws Exception {
File trustStoreFile = File.createTempFile("truststore", ".jks");
Map<String, Object> sslConfig = sslConfigsBuilder(Mode.SERVER).createNewTrustStore(trustStoreFile).build();
sslConfig.remove(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG);
sslConfig.remove(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG);
sslConfig.remove(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG);
SslFactory sslFactory = new SslFactory(Mode.SERVER);
sslFactory.configure(sslConfig);
SSLContext sslContext = ((DefaultSslEngineFactory) sslFactory.sslEngineFactory()).sslContext();
assertNotNull(sslContext, "SSL context not created");
assertSame(sslContext, ((DefaultSslEngineFactory) sslFactory.sslEngineFactory()).sslContext(), "SSL context recreated unnecessarily");
assertFalse(sslFactory.createSslEngine("localhost", 0).getUseClientMode());
Map<String, Object> sslConfig2 = sslConfigsBuilder(Mode.SERVER).createNewTrustStore(trustStoreFile).build();
try {
sslFactory.validateReconfiguration(sslConfig2);
fail("Truststore configured dynamically for listener without previous truststore");
} catch (ConfigException e) {
// Expected exception
}
}
Aggregations