use of org.apache.kafka.common.config.types.Password in project kafka by apache.
the class JaasContext method loadServerContext.
/**
* Returns an instance of this class.
*
* The context will contain the configuration specified by the JAAS configuration property
* {@link SaslConfigs#SASL_JAAS_CONFIG} with prefix `listener.name.{listenerName}.{mechanism}.`
* with listenerName and mechanism in lower case. The context `KafkaServer` will be returned
* with a single login context entry loaded from the property.
* <p>
* If the property is not defined, the context will contain the default Configuration and
* the context name will be one of:
* <ol>
* <li>Lowercased listener name followed by a period and the string `KafkaServer`</li>
* <li>The string `KafkaServer`</li>
* </ol>
* If both are valid entries in the default JAAS configuration, the first option is chosen.
* </p>
*
* @throws IllegalArgumentException if listenerName or mechanism is not defined.
*/
public static JaasContext loadServerContext(ListenerName listenerName, String mechanism, Map<String, ?> configs) {
if (listenerName == null)
throw new IllegalArgumentException("listenerName should not be null for SERVER");
if (mechanism == null)
throw new IllegalArgumentException("mechanism should not be null for SERVER");
String listenerContextName = listenerName.value().toLowerCase(Locale.ROOT) + "." + GLOBAL_CONTEXT_NAME_SERVER;
Password dynamicJaasConfig = (Password) configs.get(mechanism.toLowerCase(Locale.ROOT) + "." + SaslConfigs.SASL_JAAS_CONFIG);
if (dynamicJaasConfig == null && configs.get(SaslConfigs.SASL_JAAS_CONFIG) != null)
LOG.warn("Server config {} should be prefixed with SASL mechanism name, ignoring config", SaslConfigs.SASL_JAAS_CONFIG);
return load(Type.SERVER, listenerContextName, GLOBAL_CONTEXT_NAME_SERVER, dynamicJaasConfig);
}
use of org.apache.kafka.common.config.types.Password in project kafka by apache.
the class SslTransportLayerTest method testInvalidKeyPassword.
/**
* Tests that client connections cannot be created to a server
* if key password is invalid
*/
@ParameterizedTest
@ArgumentsSource(SslTransportLayerArgumentsProvider.class)
public void testInvalidKeyPassword(Args args) throws Exception {
args.sslServerConfigs.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, new Password("invalid"));
if (args.useInlinePem) {
// We fail fast for PEM
assertThrows(InvalidConfigurationException.class, () -> createEchoServer(args, SecurityProtocol.SSL));
return;
}
verifySslConfigsWithHandshakeFailure(args);
}
use of org.apache.kafka.common.config.types.Password in project kafka by apache.
the class SslTransportLayerTest method testServerKeystoreDynamicUpdate.
/**
* Tests reconfiguration of server keystore. Verifies that existing connections continue
* to work with old keystore and new connections work with new keystore.
*/
@ParameterizedTest
@ArgumentsSource(SslTransportLayerArgumentsProvider.class)
public void testServerKeystoreDynamicUpdate(Args args) throws Exception {
SecurityProtocol securityProtocol = SecurityProtocol.SSL;
TestSecurityConfig config = new TestSecurityConfig(args.sslServerConfigs);
ListenerName listenerName = ListenerName.forSecurityProtocol(securityProtocol);
ChannelBuilder serverChannelBuilder = ChannelBuilders.serverChannelBuilder(listenerName, false, securityProtocol, config, null, null, time, new LogContext(), defaultApiVersionsSupplier());
server = new NioEchoServer(listenerName, securityProtocol, config, "localhost", serverChannelBuilder, null, time);
server.start();
InetSocketAddress addr = new InetSocketAddress("localhost", server.port());
// Verify that client with matching truststore can authenticate, send and receive
String oldNode = "0";
Selector oldClientSelector = createSelector(args.sslClientConfigs);
oldClientSelector.connect(oldNode, addr, BUFFER_SIZE, BUFFER_SIZE);
NetworkTestUtils.checkClientConnection(selector, oldNode, 100, 10);
CertStores newServerCertStores = certBuilder(true, "server", args.useInlinePem).addHostName("localhost").build();
Map<String, Object> newKeystoreConfigs = newServerCertStores.keyStoreProps();
assertTrue(serverChannelBuilder instanceof ListenerReconfigurable, "SslChannelBuilder not reconfigurable");
ListenerReconfigurable reconfigurableBuilder = (ListenerReconfigurable) serverChannelBuilder;
assertEquals(listenerName, reconfigurableBuilder.listenerName());
reconfigurableBuilder.validateReconfiguration(newKeystoreConfigs);
reconfigurableBuilder.reconfigure(newKeystoreConfigs);
// Verify that new client with old truststore fails
oldClientSelector.connect("1", addr, BUFFER_SIZE, BUFFER_SIZE);
NetworkTestUtils.waitForChannelClose(oldClientSelector, "1", ChannelState.State.AUTHENTICATION_FAILED);
// Verify that new client with new truststore can authenticate, send and receive
args.sslClientConfigs = args.getTrustingConfig(args.clientCertStores, newServerCertStores);
Selector newClientSelector = createSelector(args.sslClientConfigs);
newClientSelector.connect("2", addr, BUFFER_SIZE, BUFFER_SIZE);
NetworkTestUtils.checkClientConnection(newClientSelector, "2", 100, 10);
// Verify that old client continues to work
NetworkTestUtils.checkClientConnection(oldClientSelector, oldNode, 100, 10);
CertStores invalidCertStores = certBuilder(true, "server", args.useInlinePem).addHostName("127.0.0.1").build();
Map<String, Object> invalidConfigs = args.getTrustingConfig(invalidCertStores, args.clientCertStores);
verifyInvalidReconfigure(reconfigurableBuilder, invalidConfigs, "keystore with different SubjectAltName");
Map<String, Object> missingStoreConfigs = new HashMap<>();
missingStoreConfigs.put(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "PKCS12");
missingStoreConfigs.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, "some.keystore.path");
missingStoreConfigs.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, new Password("some.keystore.password"));
missingStoreConfigs.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, new Password("some.key.password"));
verifyInvalidReconfigure(reconfigurableBuilder, missingStoreConfigs, "keystore not found");
// Verify that new connections continue to work with the server with previously configured keystore after failed reconfiguration
newClientSelector.connect("3", addr, BUFFER_SIZE, BUFFER_SIZE);
NetworkTestUtils.checkClientConnection(newClientSelector, "3", 100, 10);
}
use of org.apache.kafka.common.config.types.Password in project kafka by apache.
the class SslTransportLayerTest method testServerTruststoreDynamicUpdate.
/**
* Tests reconfiguration of server truststore. Verifies that existing connections continue
* to work with old truststore and new connections work with new truststore.
*/
@ParameterizedTest
@ArgumentsSource(SslTransportLayerArgumentsProvider.class)
public void testServerTruststoreDynamicUpdate(Args args) throws Exception {
SecurityProtocol securityProtocol = SecurityProtocol.SSL;
args.sslServerConfigs.put(BrokerSecurityConfigs.SSL_CLIENT_AUTH_CONFIG, "required");
TestSecurityConfig config = new TestSecurityConfig(args.sslServerConfigs);
ListenerName listenerName = ListenerName.forSecurityProtocol(securityProtocol);
ChannelBuilder serverChannelBuilder = ChannelBuilders.serverChannelBuilder(listenerName, false, securityProtocol, config, null, null, time, new LogContext(), defaultApiVersionsSupplier());
server = new NioEchoServer(listenerName, securityProtocol, config, "localhost", serverChannelBuilder, null, time);
server.start();
InetSocketAddress addr = new InetSocketAddress("localhost", server.port());
// Verify that client with matching keystore can authenticate, send and receive
String oldNode = "0";
Selector oldClientSelector = createSelector(args.sslClientConfigs);
oldClientSelector.connect(oldNode, addr, BUFFER_SIZE, BUFFER_SIZE);
NetworkTestUtils.checkClientConnection(selector, oldNode, 100, 10);
CertStores newClientCertStores = certBuilder(true, "client", args.useInlinePem).addHostName("localhost").build();
args.sslClientConfigs = args.getTrustingConfig(newClientCertStores, args.serverCertStores);
Map<String, Object> newTruststoreConfigs = newClientCertStores.trustStoreProps();
assertTrue(serverChannelBuilder instanceof ListenerReconfigurable, "SslChannelBuilder not reconfigurable");
ListenerReconfigurable reconfigurableBuilder = (ListenerReconfigurable) serverChannelBuilder;
assertEquals(listenerName, reconfigurableBuilder.listenerName());
reconfigurableBuilder.validateReconfiguration(newTruststoreConfigs);
reconfigurableBuilder.reconfigure(newTruststoreConfigs);
// Verify that new client with old truststore fails
oldClientSelector.connect("1", addr, BUFFER_SIZE, BUFFER_SIZE);
NetworkTestUtils.waitForChannelClose(oldClientSelector, "1", ChannelState.State.AUTHENTICATION_FAILED);
// Verify that new client with new truststore can authenticate, send and receive
Selector newClientSelector = createSelector(args.sslClientConfigs);
newClientSelector.connect("2", addr, BUFFER_SIZE, BUFFER_SIZE);
NetworkTestUtils.checkClientConnection(newClientSelector, "2", 100, 10);
// Verify that old client continues to work
NetworkTestUtils.checkClientConnection(oldClientSelector, oldNode, 100, 10);
Map<String, Object> invalidConfigs = new HashMap<>(newTruststoreConfigs);
invalidConfigs.put(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "INVALID_TYPE");
verifyInvalidReconfigure(reconfigurableBuilder, invalidConfigs, "invalid truststore type");
Map<String, Object> missingStoreConfigs = new HashMap<>();
missingStoreConfigs.put(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "PKCS12");
missingStoreConfigs.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, "some.truststore.path");
missingStoreConfigs.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, new Password("some.truststore.password"));
verifyInvalidReconfigure(reconfigurableBuilder, missingStoreConfigs, "truststore not found");
// Verify that new connections continue to work with the server with previously configured keystore after failed reconfiguration
newClientSelector.connect("3", addr, BUFFER_SIZE, BUFFER_SIZE);
NetworkTestUtils.checkClientConnection(newClientSelector, "3", 100, 10);
}
use of org.apache.kafka.common.config.types.Password in project kafka by apache.
the class SaslAuthenticatorTest method testTokenReauthenticationOverSaslScram.
@Test
public void testTokenReauthenticationOverSaslScram() throws Exception {
SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL;
TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Arrays.asList("SCRAM-SHA-256"));
// create jaas config for token auth
Map<String, Object> options = new HashMap<>();
String tokenId = "token1";
String tokenHmac = "abcdefghijkl";
// tokenId
options.put("username", tokenId);
// token hmac
options.put("password", tokenHmac);
// enable token authentication
options.put(ScramLoginModule.TOKEN_AUTH_CONFIG, "true");
jaasConfig.createOrUpdateEntry(TestJaasConfig.LOGIN_CONTEXT_CLIENT, ScramLoginModule.class.getName(), options);
// ensure re-authentication based on token expiry rather than a default value
saslServerConfigs.put(BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS, Long.MAX_VALUE);
/*
* create a token cache that adjusts the token expiration dynamically so that
* the first time the expiry is read during authentication we use it to define a
* session expiration time that we can then sleep through; then the second time
* the value is read (during re-authentication) it will be in the future.
*/
Function<Integer, Long> tokenLifetime = callNum -> 10 * callNum * CONNECTIONS_MAX_REAUTH_MS_VALUE;
DelegationTokenCache tokenCache = new DelegationTokenCache(ScramMechanism.mechanismNames()) {
int callNum = 0;
@Override
public TokenInformation token(String tokenId) {
TokenInformation baseTokenInfo = super.token(tokenId);
long thisLifetimeMs = System.currentTimeMillis() + tokenLifetime.apply(++callNum).longValue();
TokenInformation retvalTokenInfo = new TokenInformation(baseTokenInfo.tokenId(), baseTokenInfo.owner(), baseTokenInfo.renewers(), baseTokenInfo.issueTimestamp(), thisLifetimeMs, thisLifetimeMs);
return retvalTokenInfo;
}
};
server = createEchoServer(ListenerName.forSecurityProtocol(securityProtocol), securityProtocol, tokenCache);
KafkaPrincipal owner = SecurityUtils.parseKafkaPrincipal("User:Owner");
KafkaPrincipal renewer = SecurityUtils.parseKafkaPrincipal("User:Renewer1");
TokenInformation tokenInfo = new TokenInformation(tokenId, owner, Collections.singleton(renewer), System.currentTimeMillis(), System.currentTimeMillis(), System.currentTimeMillis());
server.tokenCache().addToken(tokenId, tokenInfo);
updateTokenCredentialCache(tokenId, tokenHmac);
// initial authentication must succeed
createClientConnection(securityProtocol, "0");
checkClientConnection("0");
// ensure metrics are as expected before trying to re-authenticate
server.verifyAuthenticationMetrics(1, 0);
server.verifyReauthenticationMetrics(0, 0);
/*
* Now re-authenticate and ensure it succeeds. We have to sleep long enough so
* that the current delegation token will be expired when the next write occurs;
* this will trigger a re-authentication. Then the second time the delegation
* token is read and transmitted to the server it will again have an expiration
* date in the future.
*/
delay(tokenLifetime.apply(1));
checkClientConnection("0");
server.verifyReauthenticationMetrics(1, 0);
}
Aggregations