use of org.apache.kafka.common.security.auth.SecurityProtocol in project kafka by apache.
the class SaslAuthenticatorTest method testPacketSizeTooBig.
/**
* Tests that packets that are too big during Kafka SASL handshake request flow
* or the actual SASL authentication flow result in authentication failure
* and do not cause any failures in the server.
*/
@Test
public void testPacketSizeTooBig() throws Exception {
SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT;
configureMechanisms("PLAIN", Arrays.asList("PLAIN"));
server = createEchoServer(securityProtocol);
// Send SASL packet with large size after valid handshake request
String node1 = "invalid1";
createClientConnection(SecurityProtocol.PLAINTEXT, node1);
sendHandshakeRequestReceiveResponse(node1, (short) 1);
ByteBuffer buffer = ByteBuffer.allocate(1024);
buffer.putInt(Integer.MAX_VALUE);
buffer.put(new byte[buffer.capacity() - 4]);
buffer.rewind();
selector.send(new NetworkSend(node1, ByteBufferSend.sizePrefixed(buffer)));
NetworkTestUtils.waitForChannelClose(selector, node1, ChannelState.READY.state());
selector.close();
// Test good connection still works
createAndCheckClientConnection(securityProtocol, "good1");
// Send packet with large size before handshake request
String node2 = "invalid2";
createClientConnection(SecurityProtocol.PLAINTEXT, node2);
buffer.clear();
buffer.putInt(Integer.MAX_VALUE);
buffer.put(new byte[buffer.capacity() - 4]);
buffer.rewind();
selector.send(new NetworkSend(node2, ByteBufferSend.sizePrefixed(buffer)));
NetworkTestUtils.waitForChannelClose(selector, node2, ChannelState.READY.state());
selector.close();
// Test good connection still works
createAndCheckClientConnection(securityProtocol, "good2");
}
use of org.apache.kafka.common.security.auth.SecurityProtocol in project kafka by apache.
the class ClientAuthenticationFailureTest method setup.
@BeforeEach
public void setup() throws Exception {
LoginManager.closeAll();
SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT;
saslServerConfigs = new HashMap<>();
saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Arrays.asList("PLAIN"));
saslClientConfigs = new HashMap<>();
saslClientConfigs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
saslClientConfigs.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
testJaasConfig = TestJaasConfig.createConfiguration("PLAIN", Arrays.asList("PLAIN"));
testJaasConfig.setClientOptions("PLAIN", TestJaasConfig.USERNAME, "anotherpassword");
server = createEchoServer(securityProtocol);
}
use of org.apache.kafka.common.security.auth.SecurityProtocol in project kafka by apache.
the class SaslAuthenticatorFailureDelayTest method testDisabledSaslMechanism.
/**
* Tests that clients with disabled SASL mechanism fail authentication with
* connection close delay if configured.
*/
@Test
public void testDisabledSaslMechanism() throws Exception {
String node = "0";
SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL;
TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Collections.singletonList("SCRAM-SHA-256"));
jaasConfig.setClientOptions("PLAIN", TestJaasConfig.USERNAME, "invalidpassword");
server = createEchoServer(securityProtocol);
createAndCheckClientAuthenticationFailure(securityProtocol, node, "SCRAM-SHA-256", null);
server.verifyAuthenticationMetrics(0, 1);
}
use of org.apache.kafka.common.security.auth.SecurityProtocol in project strimzi by strimzi.
the class HttpBridgeKafkaExternalListenersST method testWeirdUsername.
@SuppressWarnings({ "checkstyle:MethodLength" })
private void testWeirdUsername(ExtensionContext extensionContext, String weirdUserName, KafkaListenerAuthentication auth, KafkaBridgeSpec spec, SecurityProtocol securityProtocol) {
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1).editMetadata().withNamespace(namespace).endMetadata().editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withAuth(auth).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withAuth(auth).build()).endKafka().endSpec().build());
BridgeClients kafkaBridgeClientJob = new BridgeClientsBuilder().withProducerName(clusterName + "-" + producerName).withConsumerName(clusterName + "-" + consumerName).withBootstrapAddress(KafkaBridgeResources.serviceName(clusterName)).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withPort(Constants.HTTP_BRIDGE_DEFAULT_PORT).withNamespaceName(namespace).build();
// Create topic
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).editMetadata().withNamespace(namespace).endMetadata().build());
// Create user
if (auth.getType().equals(Constants.TLS_LISTENER_DEFAULT_NAME)) {
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, weirdUserName).editMetadata().withNamespace(namespace).endMetadata().build());
} else {
resourceManager.createResource(extensionContext, KafkaUserTemplates.scramShaUser(clusterName, weirdUserName).editMetadata().withNamespace(namespace).endMetadata().build());
}
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(namespace, true, kafkaClientsName).build());
// Deploy http bridge
resourceManager.createResource(extensionContext, KafkaBridgeTemplates.kafkaBridge(clusterName, KafkaResources.tlsBootstrapAddress(clusterName), 1).editMetadata().withNamespace(namespace).endMetadata().withNewSpecLike(spec).withBootstrapServers(KafkaResources.tlsBootstrapAddress(clusterName)).withNewHttp(Constants.HTTP_BRIDGE_DEFAULT_PORT).withNewConsumer().addToConfig(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest").endConsumer().endSpec().build());
final Service service = KafkaBridgeUtils.createBridgeNodePortService(clusterName, namespace, BRIDGE_EXTERNAL_SERVICE);
ServiceResource.createServiceResource(extensionContext, service, namespace);
resourceManager.createResource(extensionContext, kafkaBridgeClientJob.consumerStrimziBridge());
final String kafkaProducerExternalName = "kafka-producer-external" + new Random().nextInt(Integer.MAX_VALUE);
final List<ListenerStatus> listenerStatusList = KafkaResource.kafkaClient().inNamespace(namespace).withName(clusterName).get().getStatus().getListeners();
final String externalBootstrapServers = listenerStatusList.stream().filter(listener -> listener.getType().equals(Constants.EXTERNAL_LISTENER_DEFAULT_NAME)).findFirst().orElseThrow(RuntimeException::new).getBootstrapServers();
final KafkaClients externalKafkaProducer = new KafkaClientsBuilder().withProducerName(kafkaProducerExternalName).withBootstrapAddress(externalBootstrapServers).withNamespaceName(namespace).withTopicName(topicName).withMessageCount(100).build();
if (auth.getType().equals(Constants.TLS_LISTENER_DEFAULT_NAME)) {
// tls producer
resourceManager.createResource(extensionContext, externalKafkaProducer.producerTlsStrimzi(clusterName, weirdUserName));
} else {
// scram-sha producer
resourceManager.createResource(extensionContext, externalKafkaProducer.producerScramShaStrimzi(clusterName, weirdUserName));
}
ClientUtils.waitForClientSuccess(kafkaProducerExternalName, namespace, MESSAGE_COUNT);
// delete kafka producer job
JobUtils.deleteJobWithWait(namespace, kafkaProducerExternalName);
ClientUtils.waitForClientSuccess(clusterName + "-" + consumerName, namespace, MESSAGE_COUNT);
}
use of org.apache.kafka.common.security.auth.SecurityProtocol in project kafka-rest by confluentinc.
the class ClusterTestHarness method getBrokerProperties.
protected Properties getBrokerProperties(int i) {
final Option<File> noFile = Option.apply(null);
final Option<SecurityProtocol> noInterBrokerSecurityProtocol = Option.apply(getBrokerSecurityProtocol());
Properties props = TestUtils.createBrokerConfig(i, zkConnect, false, false, TestUtils.RandomPort(), noInterBrokerSecurityProtocol, noFile, Option.<Properties>empty(), true, false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), Option.<String>empty(), 1, false, 1, (short) 1);
props.setProperty("auto.create.topics.enable", "false");
// We *must* override this to use the port we allocated (Kafka currently allocates one port
// that it always uses for ZK
props.setProperty("zookeeper.connect", this.zkConnect);
return props;
}
Aggregations