Search in sources :

Example 66 with SecurityProtocol

use of org.apache.kafka.common.security.auth.SecurityProtocol in project kafka by apache.

the class UpdateMetadataRequest method normalize.

private void normalize() {
    // Version 3 added support for listener name, which we can infer from the security protocol for older versions
    if (version() < 3) {
        for (UpdateMetadataBroker liveBroker : data.liveBrokers()) {
            // Set endpoints so that callers can rely on it always being present
            if (version() == 0 && liveBroker.endpoints().isEmpty()) {
                SecurityProtocol securityProtocol = SecurityProtocol.PLAINTEXT;
                liveBroker.setEndpoints(singletonList(new UpdateMetadataEndpoint().setHost(liveBroker.v0Host()).setPort(liveBroker.v0Port()).setSecurityProtocol(securityProtocol.id).setListener(ListenerName.forSecurityProtocol(securityProtocol).value())));
            } else {
                for (UpdateMetadataEndpoint endpoint : liveBroker.endpoints()) {
                    // Set listener so that callers can rely on it always being present
                    if (endpoint.listener().isEmpty())
                        endpoint.setListener(listenerNameFromSecurityProtocol(endpoint));
                }
            }
        }
    }
    if (version() >= 5) {
        for (UpdateMetadataTopicState topicState : data.topicStates()) {
            for (UpdateMetadataPartitionState partitionState : topicState.partitionStates()) {
                // Set the topic name so that we can always present the ungrouped view to callers
                partitionState.setTopicName(topicState.topicName());
            }
        }
    }
}
Also used : UpdateMetadataBroker(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataBroker) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) UpdateMetadataTopicState(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataTopicState) UpdateMetadataEndpoint(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataEndpoint) UpdateMetadataPartitionState(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState)

Example 67 with SecurityProtocol

use of org.apache.kafka.common.security.auth.SecurityProtocol in project kafka by apache.

the class SslTransportLayerTest method testServerKeystoreDynamicUpdate.

/**
 * Tests reconfiguration of server keystore. Verifies that existing connections continue
 * to work with old keystore and new connections work with new keystore.
 */
@ParameterizedTest
@ArgumentsSource(SslTransportLayerArgumentsProvider.class)
public void testServerKeystoreDynamicUpdate(Args args) throws Exception {
    SecurityProtocol securityProtocol = SecurityProtocol.SSL;
    TestSecurityConfig config = new TestSecurityConfig(args.sslServerConfigs);
    ListenerName listenerName = ListenerName.forSecurityProtocol(securityProtocol);
    ChannelBuilder serverChannelBuilder = ChannelBuilders.serverChannelBuilder(listenerName, false, securityProtocol, config, null, null, time, new LogContext(), defaultApiVersionsSupplier());
    server = new NioEchoServer(listenerName, securityProtocol, config, "localhost", serverChannelBuilder, null, time);
    server.start();
    InetSocketAddress addr = new InetSocketAddress("localhost", server.port());
    // Verify that client with matching truststore can authenticate, send and receive
    String oldNode = "0";
    Selector oldClientSelector = createSelector(args.sslClientConfigs);
    oldClientSelector.connect(oldNode, addr, BUFFER_SIZE, BUFFER_SIZE);
    NetworkTestUtils.checkClientConnection(selector, oldNode, 100, 10);
    CertStores newServerCertStores = certBuilder(true, "server", args.useInlinePem).addHostName("localhost").build();
    Map<String, Object> newKeystoreConfigs = newServerCertStores.keyStoreProps();
    assertTrue(serverChannelBuilder instanceof ListenerReconfigurable, "SslChannelBuilder not reconfigurable");
    ListenerReconfigurable reconfigurableBuilder = (ListenerReconfigurable) serverChannelBuilder;
    assertEquals(listenerName, reconfigurableBuilder.listenerName());
    reconfigurableBuilder.validateReconfiguration(newKeystoreConfigs);
    reconfigurableBuilder.reconfigure(newKeystoreConfigs);
    // Verify that new client with old truststore fails
    oldClientSelector.connect("1", addr, BUFFER_SIZE, BUFFER_SIZE);
    NetworkTestUtils.waitForChannelClose(oldClientSelector, "1", ChannelState.State.AUTHENTICATION_FAILED);
    // Verify that new client with new truststore can authenticate, send and receive
    args.sslClientConfigs = args.getTrustingConfig(args.clientCertStores, newServerCertStores);
    Selector newClientSelector = createSelector(args.sslClientConfigs);
    newClientSelector.connect("2", addr, BUFFER_SIZE, BUFFER_SIZE);
    NetworkTestUtils.checkClientConnection(newClientSelector, "2", 100, 10);
    // Verify that old client continues to work
    NetworkTestUtils.checkClientConnection(oldClientSelector, oldNode, 100, 10);
    CertStores invalidCertStores = certBuilder(true, "server", args.useInlinePem).addHostName("127.0.0.1").build();
    Map<String, Object> invalidConfigs = args.getTrustingConfig(invalidCertStores, args.clientCertStores);
    verifyInvalidReconfigure(reconfigurableBuilder, invalidConfigs, "keystore with different SubjectAltName");
    Map<String, Object> missingStoreConfigs = new HashMap<>();
    missingStoreConfigs.put(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "PKCS12");
    missingStoreConfigs.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, "some.keystore.path");
    missingStoreConfigs.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, new Password("some.keystore.password"));
    missingStoreConfigs.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, new Password("some.key.password"));
    verifyInvalidReconfigure(reconfigurableBuilder, missingStoreConfigs, "keystore not found");
    // Verify that new connections continue to work with the server with previously configured keystore after failed reconfiguration
    newClientSelector.connect("3", addr, BUFFER_SIZE, BUFFER_SIZE);
    NetworkTestUtils.checkClientConnection(newClientSelector, "3", 100, 10);
}
Also used : HashMap(java.util.HashMap) InetSocketAddress(java.net.InetSocketAddress) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) LogContext(org.apache.kafka.common.utils.LogContext) TestSecurityConfig(org.apache.kafka.common.security.TestSecurityConfig) Password(org.apache.kafka.common.config.types.Password) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) ArgumentsSource(org.junit.jupiter.params.provider.ArgumentsSource)

Example 68 with SecurityProtocol

use of org.apache.kafka.common.security.auth.SecurityProtocol in project kafka by apache.

the class SslTransportLayerTest method testServerTruststoreDynamicUpdate.

/**
 * Tests reconfiguration of server truststore. Verifies that existing connections continue
 * to work with old truststore and new connections work with new truststore.
 */
@ParameterizedTest
@ArgumentsSource(SslTransportLayerArgumentsProvider.class)
public void testServerTruststoreDynamicUpdate(Args args) throws Exception {
    SecurityProtocol securityProtocol = SecurityProtocol.SSL;
    args.sslServerConfigs.put(BrokerSecurityConfigs.SSL_CLIENT_AUTH_CONFIG, "required");
    TestSecurityConfig config = new TestSecurityConfig(args.sslServerConfigs);
    ListenerName listenerName = ListenerName.forSecurityProtocol(securityProtocol);
    ChannelBuilder serverChannelBuilder = ChannelBuilders.serverChannelBuilder(listenerName, false, securityProtocol, config, null, null, time, new LogContext(), defaultApiVersionsSupplier());
    server = new NioEchoServer(listenerName, securityProtocol, config, "localhost", serverChannelBuilder, null, time);
    server.start();
    InetSocketAddress addr = new InetSocketAddress("localhost", server.port());
    // Verify that client with matching keystore can authenticate, send and receive
    String oldNode = "0";
    Selector oldClientSelector = createSelector(args.sslClientConfigs);
    oldClientSelector.connect(oldNode, addr, BUFFER_SIZE, BUFFER_SIZE);
    NetworkTestUtils.checkClientConnection(selector, oldNode, 100, 10);
    CertStores newClientCertStores = certBuilder(true, "client", args.useInlinePem).addHostName("localhost").build();
    args.sslClientConfigs = args.getTrustingConfig(newClientCertStores, args.serverCertStores);
    Map<String, Object> newTruststoreConfigs = newClientCertStores.trustStoreProps();
    assertTrue(serverChannelBuilder instanceof ListenerReconfigurable, "SslChannelBuilder not reconfigurable");
    ListenerReconfigurable reconfigurableBuilder = (ListenerReconfigurable) serverChannelBuilder;
    assertEquals(listenerName, reconfigurableBuilder.listenerName());
    reconfigurableBuilder.validateReconfiguration(newTruststoreConfigs);
    reconfigurableBuilder.reconfigure(newTruststoreConfigs);
    // Verify that new client with old truststore fails
    oldClientSelector.connect("1", addr, BUFFER_SIZE, BUFFER_SIZE);
    NetworkTestUtils.waitForChannelClose(oldClientSelector, "1", ChannelState.State.AUTHENTICATION_FAILED);
    // Verify that new client with new truststore can authenticate, send and receive
    Selector newClientSelector = createSelector(args.sslClientConfigs);
    newClientSelector.connect("2", addr, BUFFER_SIZE, BUFFER_SIZE);
    NetworkTestUtils.checkClientConnection(newClientSelector, "2", 100, 10);
    // Verify that old client continues to work
    NetworkTestUtils.checkClientConnection(oldClientSelector, oldNode, 100, 10);
    Map<String, Object> invalidConfigs = new HashMap<>(newTruststoreConfigs);
    invalidConfigs.put(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "INVALID_TYPE");
    verifyInvalidReconfigure(reconfigurableBuilder, invalidConfigs, "invalid truststore type");
    Map<String, Object> missingStoreConfigs = new HashMap<>();
    missingStoreConfigs.put(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "PKCS12");
    missingStoreConfigs.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, "some.truststore.path");
    missingStoreConfigs.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, new Password("some.truststore.password"));
    verifyInvalidReconfigure(reconfigurableBuilder, missingStoreConfigs, "truststore not found");
    // Verify that new connections continue to work with the server with previously configured keystore after failed reconfiguration
    newClientSelector.connect("3", addr, BUFFER_SIZE, BUFFER_SIZE);
    NetworkTestUtils.checkClientConnection(newClientSelector, "3", 100, 10);
}
Also used : HashMap(java.util.HashMap) InetSocketAddress(java.net.InetSocketAddress) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) LogContext(org.apache.kafka.common.utils.LogContext) TestSecurityConfig(org.apache.kafka.common.security.TestSecurityConfig) Password(org.apache.kafka.common.config.types.Password) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) ArgumentsSource(org.junit.jupiter.params.provider.ArgumentsSource)

Example 69 with SecurityProtocol

use of org.apache.kafka.common.security.auth.SecurityProtocol in project kafka by apache.

the class SslTransportLayerTest method testInterBrokerSslConfigValidation.

/**
 * Verifies that inter-broker listener with validation of truststore against keystore works
 * with configs including mutual authentication and hostname verification.
 */
@ParameterizedTest
@ArgumentsSource(SslTransportLayerArgumentsProvider.class)
public void testInterBrokerSslConfigValidation(Args args) throws Exception {
    SecurityProtocol securityProtocol = SecurityProtocol.SSL;
    args.sslServerConfigs.put(BrokerSecurityConfigs.SSL_CLIENT_AUTH_CONFIG, "required");
    args.sslServerConfigs.put(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "HTTPS");
    args.sslServerConfigs.putAll(args.serverCertStores.keyStoreProps());
    args.sslServerConfigs.putAll(args.serverCertStores.trustStoreProps());
    args.sslClientConfigs.putAll(args.serverCertStores.keyStoreProps());
    args.sslClientConfigs.putAll(args.serverCertStores.trustStoreProps());
    TestSecurityConfig config = new TestSecurityConfig(args.sslServerConfigs);
    ListenerName listenerName = ListenerName.forSecurityProtocol(securityProtocol);
    ChannelBuilder serverChannelBuilder = ChannelBuilders.serverChannelBuilder(listenerName, true, securityProtocol, config, null, null, time, new LogContext(), defaultApiVersionsSupplier());
    server = new NioEchoServer(listenerName, securityProtocol, config, "localhost", serverChannelBuilder, null, time);
    server.start();
    this.selector = createSelector(args.sslClientConfigs, null, null, null);
    InetSocketAddress addr = new InetSocketAddress("localhost", server.port());
    selector.connect("0", addr, BUFFER_SIZE, BUFFER_SIZE);
    NetworkTestUtils.checkClientConnection(selector, "0", 100, 10);
}
Also used : InetSocketAddress(java.net.InetSocketAddress) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) TestSecurityConfig(org.apache.kafka.common.security.TestSecurityConfig) LogContext(org.apache.kafka.common.utils.LogContext) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) ArgumentsSource(org.junit.jupiter.params.provider.ArgumentsSource)

Example 70 with SecurityProtocol

use of org.apache.kafka.common.security.auth.SecurityProtocol in project kafka by apache.

the class RequestResponseTest method createUpdateMetadataRequest.

private UpdateMetadataRequest createUpdateMetadataRequest(short version, String rack) {
    List<UpdateMetadataPartitionState> partitionStates = new ArrayList<>();
    List<Integer> isr = asList(1, 2);
    List<Integer> replicas = asList(1, 2, 3, 4);
    List<Integer> offlineReplicas = emptyList();
    partitionStates.add(new UpdateMetadataPartitionState().setTopicName("topic5").setPartitionIndex(105).setControllerEpoch(0).setLeader(2).setLeaderEpoch(1).setIsr(isr).setZkVersion(2).setReplicas(replicas).setOfflineReplicas(offlineReplicas));
    partitionStates.add(new UpdateMetadataPartitionState().setTopicName("topic5").setPartitionIndex(1).setControllerEpoch(1).setLeader(1).setLeaderEpoch(1).setIsr(isr).setZkVersion(2).setReplicas(replicas).setOfflineReplicas(offlineReplicas));
    partitionStates.add(new UpdateMetadataPartitionState().setTopicName("topic20").setPartitionIndex(1).setControllerEpoch(1).setLeader(0).setLeaderEpoch(1).setIsr(isr).setZkVersion(2).setReplicas(replicas).setOfflineReplicas(offlineReplicas));
    Map<String, Uuid> topicIds = new HashMap<>();
    if (version > 6) {
        topicIds.put("topic5", Uuid.randomUuid());
        topicIds.put("topic20", Uuid.randomUuid());
    }
    SecurityProtocol plaintext = SecurityProtocol.PLAINTEXT;
    List<UpdateMetadataEndpoint> endpoints1 = new ArrayList<>();
    endpoints1.add(new UpdateMetadataEndpoint().setHost("host1").setPort(1223).setSecurityProtocol(plaintext.id).setListener(ListenerName.forSecurityProtocol(plaintext).value()));
    List<UpdateMetadataEndpoint> endpoints2 = new ArrayList<>();
    endpoints2.add(new UpdateMetadataEndpoint().setHost("host1").setPort(1244).setSecurityProtocol(plaintext.id).setListener(ListenerName.forSecurityProtocol(plaintext).value()));
    if (version > 0) {
        SecurityProtocol ssl = SecurityProtocol.SSL;
        endpoints2.add(new UpdateMetadataEndpoint().setHost("host2").setPort(1234).setSecurityProtocol(ssl.id).setListener(ListenerName.forSecurityProtocol(ssl).value()));
        endpoints2.add(new UpdateMetadataEndpoint().setHost("host2").setPort(1334).setSecurityProtocol(ssl.id));
        if (version >= 3)
            endpoints2.get(1).setListener("CLIENT");
    }
    List<UpdateMetadataBroker> liveBrokers = asList(new UpdateMetadataBroker().setId(0).setEndpoints(endpoints1).setRack(rack), new UpdateMetadataBroker().setId(1).setEndpoints(endpoints2).setRack(rack));
    return new UpdateMetadataRequest.Builder(version, 1, 10, 0, partitionStates, liveBrokers, topicIds).build();
}
Also used : UpdateMetadataBroker(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataBroker) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) Uuid(org.apache.kafka.common.Uuid) UpdateMetadataEndpoint(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataEndpoint) UpdateMetadataPartitionState(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState)

Aggregations

SecurityProtocol (org.apache.kafka.common.security.auth.SecurityProtocol)106 Test (org.junit.jupiter.api.Test)50 Test (org.junit.Test)29 HashMap (java.util.HashMap)22 InetSocketAddress (java.net.InetSocketAddress)14 NetworkSend (org.apache.kafka.common.network.NetworkSend)11 RequestHeader (org.apache.kafka.common.requests.RequestHeader)11 IOException (java.io.IOException)10 PlainLoginModule (org.apache.kafka.common.security.plain.PlainLoginModule)10 TestSecurityConfig (org.apache.kafka.common.security.TestSecurityConfig)9 ScramLoginModule (org.apache.kafka.common.security.scram.ScramLoginModule)9 File (java.io.File)8 ByteBuffer (java.nio.ByteBuffer)8 Properties (java.util.Properties)8 ApiVersionsRequest (org.apache.kafka.common.requests.ApiVersionsRequest)7 ApiVersionsResponse (org.apache.kafka.common.requests.ApiVersionsResponse)7 LogContext (org.apache.kafka.common.utils.LogContext)6 Random (java.util.Random)5 Password (org.apache.kafka.common.config.types.Password)5 ListenerName (org.apache.kafka.common.network.ListenerName)5