use of org.apache.kafka.common.config.ConfigDef in project kafka by apache.
the class ExpiringCredentialRefreshingLoginTest method refreshConfigThatPerformsReloginEveryGivenPercentageOfLifetime.
private static ExpiringCredentialRefreshConfig refreshConfigThatPerformsReloginEveryGivenPercentageOfLifetime(double refreshWindowFactor, short minPeriodSeconds, short bufferSeconds, boolean clientReloginAllowedBeforeLogout) {
Map<Object, Object> configs = new HashMap<>();
configs.put(SaslConfigs.SASL_LOGIN_REFRESH_WINDOW_FACTOR, refreshWindowFactor);
configs.put(SaslConfigs.SASL_LOGIN_REFRESH_WINDOW_JITTER, 0);
configs.put(SaslConfigs.SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS, minPeriodSeconds);
configs.put(SaslConfigs.SASL_LOGIN_REFRESH_BUFFER_SECONDS, bufferSeconds);
return new ExpiringCredentialRefreshConfig(new ConfigDef().withClientSaslSupport().parse(configs), clientReloginAllowedBeforeLogout);
}
use of org.apache.kafka.common.config.ConfigDef in project kafka by apache.
the class AbstractHerder method validateConnectorConfig.
ConfigInfos validateConnectorConfig(Map<String, String> connectorProps, boolean doLog) {
if (worker.configTransformer() != null) {
connectorProps = worker.configTransformer().transform(connectorProps);
}
String connType = connectorProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG);
if (connType == null)
throw new BadRequestException("Connector config " + connectorProps + " contains no connector type");
Connector connector = getConnector(connType);
org.apache.kafka.connect.health.ConnectorType connectorType;
ClassLoader savedLoader = plugins().compareAndSwapLoaders(connector);
try {
ConfigDef baseConfigDef;
if (connector instanceof SourceConnector) {
baseConfigDef = SourceConnectorConfig.configDef();
connectorType = org.apache.kafka.connect.health.ConnectorType.SOURCE;
} else {
baseConfigDef = SinkConnectorConfig.configDef();
SinkConnectorConfig.validate(connectorProps);
connectorType = org.apache.kafka.connect.health.ConnectorType.SINK;
}
ConfigDef enrichedConfigDef = ConnectorConfig.enrich(plugins(), baseConfigDef, connectorProps, false);
Map<String, ConfigValue> validatedConnectorConfig = validateBasicConnectorConfig(connector, enrichedConfigDef, connectorProps);
connectorProps.entrySet().stream().filter(e -> e.getValue() == null).map(Map.Entry::getKey).forEach(prop -> validatedConnectorConfig.computeIfAbsent(prop, ConfigValue::new).addErrorMessage("Null value can not be supplied as the configuration value."));
List<ConfigValue> configValues = new ArrayList<>(validatedConnectorConfig.values());
Map<String, ConfigKey> configKeys = new LinkedHashMap<>(enrichedConfigDef.configKeys());
Set<String> allGroups = new LinkedHashSet<>(enrichedConfigDef.groups());
// do custom connector-specific validation
ConfigDef configDef = connector.config();
if (null == configDef) {
throw new BadRequestException(String.format("%s.config() must return a ConfigDef that is not null.", connector.getClass().getName()));
}
Config config = connector.validate(connectorProps);
if (null == config) {
throw new BadRequestException(String.format("%s.validate() must return a Config that is not null.", connector.getClass().getName()));
}
configKeys.putAll(configDef.configKeys());
allGroups.addAll(configDef.groups());
configValues.addAll(config.configValues());
ConfigInfos configInfos = generateResult(connType, configKeys, configValues, new ArrayList<>(allGroups));
AbstractConfig connectorConfig = new AbstractConfig(new ConfigDef(), connectorProps, doLog);
String connName = connectorProps.get(ConnectorConfig.NAME_CONFIG);
ConfigInfos producerConfigInfos = null;
ConfigInfos consumerConfigInfos = null;
ConfigInfos adminConfigInfos = null;
if (connectorType.equals(org.apache.kafka.connect.health.ConnectorType.SOURCE)) {
producerConfigInfos = validateClientOverrides(connName, ConnectorConfig.CONNECTOR_CLIENT_PRODUCER_OVERRIDES_PREFIX, connectorConfig, ProducerConfig.configDef(), connector.getClass(), connectorType, ConnectorClientConfigRequest.ClientType.PRODUCER, connectorClientConfigOverridePolicy);
return mergeConfigInfos(connType, configInfos, producerConfigInfos);
} else {
consumerConfigInfos = validateClientOverrides(connName, ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX, connectorConfig, ProducerConfig.configDef(), connector.getClass(), connectorType, ConnectorClientConfigRequest.ClientType.CONSUMER, connectorClientConfigOverridePolicy);
// check if topic for dead letter queue exists
String topic = connectorProps.get(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG);
if (topic != null && !topic.isEmpty()) {
adminConfigInfos = validateClientOverrides(connName, ConnectorConfig.CONNECTOR_CLIENT_ADMIN_OVERRIDES_PREFIX, connectorConfig, ProducerConfig.configDef(), connector.getClass(), connectorType, ConnectorClientConfigRequest.ClientType.ADMIN, connectorClientConfigOverridePolicy);
}
}
return mergeConfigInfos(connType, configInfos, consumerConfigInfos, adminConfigInfos);
} finally {
Plugins.compareAndSwapLoaders(savedLoader);
}
}
use of org.apache.kafka.common.config.ConfigDef in project kafka by apache.
the class MirrorConnectorConfigTest method testNonMutationOfConfigDef.
@Test
public void testNonMutationOfConfigDef() {
Collection<String> taskSpecificProperties = Arrays.asList(MirrorConnectorConfig.TASK_TOPIC_PARTITIONS, MirrorConnectorConfig.TASK_CONSUMER_GROUPS);
// Sanity check to make sure that these properties are actually defined for the task config,
// and that the task config class has been loaded and statically initialized by the JVM
ConfigDef taskConfigDef = MirrorTaskConfig.TASK_CONFIG_DEF;
taskSpecificProperties.forEach(taskSpecificProperty -> assertTrue(taskConfigDef.names().contains(taskSpecificProperty), taskSpecificProperty + " should be defined for task ConfigDef"));
// Ensure that the task config class hasn't accidentally modified the connector config
ConfigDef connectorConfigDef = MirrorConnectorConfig.CONNECTOR_CONFIG_DEF;
taskSpecificProperties.forEach(taskSpecificProperty -> assertFalse(connectorConfigDef.names().contains(taskSpecificProperty), taskSpecificProperty + " should not be defined for connector ConfigDef"));
}
use of org.apache.kafka.common.config.ConfigDef in project kafka by apache.
the class QuotaConfigs method ipConfigs.
public static ConfigDef ipConfigs() {
ConfigDef configDef = new ConfigDef();
configDef.define(IP_CONNECTION_RATE_OVERRIDE_CONFIG, ConfigDef.Type.INT, Integer.MAX_VALUE, ConfigDef.Range.atLeast(0), ConfigDef.Importance.MEDIUM, IP_CONNECTION_RATE_DOC);
return configDef;
}
use of org.apache.kafka.common.config.ConfigDef in project kafka by apache.
the class ConfigurationControlManager method isSplittable.
boolean isSplittable(ConfigResource.Type type, String key) {
ConfigDef configDef = configDefs.get(type);
if (configDef == null) {
return false;
}
ConfigKey configKey = configDef.configKeys().get(key);
if (configKey == null) {
return false;
}
return configKey.type == ConfigDef.Type.LIST;
}
Aggregations