Search in sources :

Example 21 with ApiVersion

use of org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion in project kafka by apache.

the class KafkaAdminClientTest method testDeleteConsumerGroupsWithOlderBroker.

@Test
public void testDeleteConsumerGroupsWithOlderBroker() throws Exception {
    final List<String> groupIds = singletonList("groupId");
    ApiVersion findCoordinatorV3 = new ApiVersion().setApiKey(ApiKeys.FIND_COORDINATOR.id).setMinVersion((short) 0).setMaxVersion((short) 3);
    ApiVersion describeGroups = new ApiVersion().setApiKey(ApiKeys.DESCRIBE_GROUPS.id).setMinVersion((short) 0).setMaxVersion(ApiKeys.DELETE_GROUPS.latestVersion());
    try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(Arrays.asList(findCoordinatorV3, describeGroups)));
        // Retriable FindCoordinatorResponse errors should be retried
        env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode()));
        env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode()));
        env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
        final DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection();
        validResponse.add(new DeletableGroupResult().setGroupId("groupId").setErrorCode(Errors.NONE.code()));
        env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(validResponse)));
        final DeleteConsumerGroupsResult result = env.adminClient().deleteConsumerGroups(groupIds);
        final KafkaFuture<Void> results = result.deletedGroups().get("groupId");
        assertNull(results.get());
        // should throw error for non-retriable errors
        env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.GROUP_AUTHORIZATION_FAILED, Node.noNode()));
        DeleteConsumerGroupsResult errorResult = env.adminClient().deleteConsumerGroups(groupIds);
        TestUtils.assertFutureError(errorResult.deletedGroups().get("groupId"), GroupAuthorizationException.class);
        // Retriable errors should be retried
        env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
        final DeletableGroupResultCollection errorResponse = new DeletableGroupResultCollection();
        errorResponse.add(new DeletableGroupResult().setGroupId("groupId").setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()));
        env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(errorResponse)));
        /*
             * We need to return two responses here, one for NOT_COORDINATOR call when calling delete a consumer group
             * api using coordinator that has moved. This will retry whole operation. So we need to again respond with a
             * FindCoordinatorResponse.
             *
             * And the same reason for the following COORDINATOR_NOT_AVAILABLE error response
             */
        DeletableGroupResultCollection coordinatorMoved = new DeletableGroupResultCollection();
        coordinatorMoved.add(new DeletableGroupResult().setGroupId("groupId").setErrorCode(Errors.NOT_COORDINATOR.code()));
        env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(coordinatorMoved)));
        env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
        coordinatorMoved = new DeletableGroupResultCollection();
        coordinatorMoved.add(new DeletableGroupResult().setGroupId("groupId").setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()));
        env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(coordinatorMoved)));
        env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
        env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(validResponse)));
        errorResult = env.adminClient().deleteConsumerGroups(groupIds);
        final KafkaFuture<Void> errorResults = errorResult.deletedGroups().get("groupId");
        assertNull(errorResults.get());
    }
}
Also used : ApiVersion(org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion) DeletableGroupResult(org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResult) DeleteGroupsResponseData(org.apache.kafka.common.message.DeleteGroupsResponseData) DeleteGroupsResponse(org.apache.kafka.common.requests.DeleteGroupsResponse) DeletableGroupResultCollection(org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResultCollection) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 22 with ApiVersion

use of org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion in project kafka by apache.

the class ApiVersionsResponseTest method testIntersect.

@Test
public void testIntersect() {
    assertFalse(ApiVersionsResponse.intersect(null, null).isPresent());
    assertThrows(IllegalArgumentException.class, () -> ApiVersionsResponse.intersect(new ApiVersion().setApiKey((short) 10), new ApiVersion().setApiKey((short) 3)));
    short min = 0;
    short max = 10;
    ApiVersion thisVersion = new ApiVersion().setApiKey(ApiKeys.FETCH.id).setMinVersion(min).setMaxVersion(Short.MAX_VALUE);
    ApiVersion other = new ApiVersion().setApiKey(ApiKeys.FETCH.id).setMinVersion(Short.MIN_VALUE).setMaxVersion(max);
    ApiVersion expected = new ApiVersion().setApiKey(ApiKeys.FETCH.id).setMinVersion(min).setMaxVersion(max);
    assertFalse(ApiVersionsResponse.intersect(thisVersion, null).isPresent());
    assertFalse(ApiVersionsResponse.intersect(null, other).isPresent());
    assertEquals(expected, ApiVersionsResponse.intersect(thisVersion, other).get());
    // test for symmetric
    assertEquals(expected, ApiVersionsResponse.intersect(other, thisVersion).get());
}
Also used : ApiVersion(org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 23 with ApiVersion

use of org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion in project kafka by apache.

the class TransactionManager method handleCoordinatorReady.

void handleCoordinatorReady() {
    NodeApiVersions nodeApiVersions = transactionCoordinator != null ? apiVersions.get(transactionCoordinator.idString()) : null;
    ApiVersion initProducerIdVersion = nodeApiVersions != null ? nodeApiVersions.apiVersion(ApiKeys.INIT_PRODUCER_ID) : null;
    this.coordinatorSupportsBumpingEpoch = initProducerIdVersion != null && initProducerIdVersion.maxVersion() >= 3;
}
Also used : ApiVersion(org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions)

Example 24 with ApiVersion

use of org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion in project kafka by apache.

the class SaslAuthenticatorTest method startServerApiVersionsUnsupportedByClient.

private NioEchoServer startServerApiVersionsUnsupportedByClient(final SecurityProtocol securityProtocol, String saslMechanism) throws Exception {
    final ListenerName listenerName = ListenerName.forSecurityProtocol(securityProtocol);
    final Map<String, ?> configs = Collections.emptyMap();
    final JaasContext jaasContext = JaasContext.loadServerContext(listenerName, saslMechanism, configs);
    final Map<String, JaasContext> jaasContexts = Collections.singletonMap(saslMechanism, jaasContext);
    boolean isScram = ScramMechanism.isScram(saslMechanism);
    if (isScram)
        ScramCredentialUtils.createCache(credentialCache, Arrays.asList(saslMechanism));
    Supplier<ApiVersionsResponse> apiVersionSupplier = () -> {
        ApiVersionCollection versionCollection = new ApiVersionCollection(2);
        versionCollection.add(new ApiVersion().setApiKey(ApiKeys.SASL_HANDSHAKE.id).setMinVersion((short) 0).setMaxVersion((short) 100));
        versionCollection.add(new ApiVersion().setApiKey(ApiKeys.SASL_AUTHENTICATE.id).setMinVersion((short) 0).setMaxVersion((short) 100));
        return new ApiVersionsResponse(new ApiVersionsResponseData().setApiKeys(versionCollection));
    };
    SaslChannelBuilder serverChannelBuilder = new SaslChannelBuilder(Mode.SERVER, jaasContexts, securityProtocol, listenerName, false, saslMechanism, true, credentialCache, null, null, time, new LogContext(), apiVersionSupplier);
    serverChannelBuilder.configure(saslServerConfigs);
    server = new NioEchoServer(listenerName, securityProtocol, new TestSecurityConfig(saslServerConfigs), "localhost", serverChannelBuilder, credentialCache, time);
    server.start();
    return server;
}
Also used : ApiVersionCollection(org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersionCollection) ApiVersionsResponse(org.apache.kafka.common.requests.ApiVersionsResponse) ApiVersion(org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion) LogContext(org.apache.kafka.common.utils.LogContext) ListenerName(org.apache.kafka.common.network.ListenerName) JaasContext(org.apache.kafka.common.security.JaasContext) NioEchoServer(org.apache.kafka.common.network.NioEchoServer) TestSecurityConfig(org.apache.kafka.common.security.TestSecurityConfig) SaslChannelBuilder(org.apache.kafka.common.network.SaslChannelBuilder) ApiVersionsResponseData(org.apache.kafka.common.message.ApiVersionsResponseData)

Example 25 with ApiVersion

use of org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion in project kafka by apache.

the class SaslAuthenticatorTest method startServerWithoutSaslAuthenticateHeader.

private NioEchoServer startServerWithoutSaslAuthenticateHeader(final SecurityProtocol securityProtocol, String saslMechanism) throws Exception {
    final ListenerName listenerName = ListenerName.forSecurityProtocol(securityProtocol);
    final Map<String, ?> configs = Collections.emptyMap();
    final JaasContext jaasContext = JaasContext.loadServerContext(listenerName, saslMechanism, configs);
    final Map<String, JaasContext> jaasContexts = Collections.singletonMap(saslMechanism, jaasContext);
    boolean isScram = ScramMechanism.isScram(saslMechanism);
    if (isScram)
        ScramCredentialUtils.createCache(credentialCache, Arrays.asList(saslMechanism));
    Supplier<ApiVersionsResponse> apiVersionSupplier = () -> {
        ApiVersionsResponse defaultApiVersionResponse = ApiVersionsResponse.defaultApiVersionsResponse(ApiMessageType.ListenerType.ZK_BROKER);
        ApiVersionCollection apiVersions = new ApiVersionCollection();
        for (ApiVersion apiVersion : defaultApiVersionResponse.data().apiKeys()) {
            if (apiVersion.apiKey() != ApiKeys.SASL_AUTHENTICATE.id) {
                // ApiVersion can NOT be reused in second ApiVersionCollection
                // due to the internal pointers it contains.
                apiVersions.add(apiVersion.duplicate());
            }
        }
        ApiVersionsResponseData data = new ApiVersionsResponseData().setErrorCode(Errors.NONE.code()).setThrottleTimeMs(0).setApiKeys(apiVersions);
        return new ApiVersionsResponse(data);
    };
    SaslChannelBuilder serverChannelBuilder = new SaslChannelBuilder(Mode.SERVER, jaasContexts, securityProtocol, listenerName, false, saslMechanism, true, credentialCache, null, null, time, new LogContext(), apiVersionSupplier) {

        @Override
        protected SaslServerAuthenticator buildServerAuthenticator(Map<String, ?> configs, Map<String, AuthenticateCallbackHandler> callbackHandlers, String id, TransportLayer transportLayer, Map<String, Subject> subjects, Map<String, Long> connectionsMaxReauthMsByMechanism, ChannelMetadataRegistry metadataRegistry) {
            return new SaslServerAuthenticator(configs, callbackHandlers, id, subjects, null, listenerName, securityProtocol, transportLayer, connectionsMaxReauthMsByMechanism, metadataRegistry, time, apiVersionSupplier) {

                @Override
                protected void enableKafkaSaslAuthenticateHeaders(boolean flag) {
                // Don't enable Kafka SASL_AUTHENTICATE headers
                }
            };
        }
    };
    serverChannelBuilder.configure(saslServerConfigs);
    server = new NioEchoServer(listenerName, securityProtocol, new TestSecurityConfig(saslServerConfigs), "localhost", serverChannelBuilder, credentialCache, time);
    server.start();
    return server;
}
Also used : ApiVersionCollection(org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersionCollection) ApiVersionsResponse(org.apache.kafka.common.requests.ApiVersionsResponse) ApiVersion(org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion) ChannelMetadataRegistry(org.apache.kafka.common.network.ChannelMetadataRegistry) LogContext(org.apache.kafka.common.utils.LogContext) ListenerName(org.apache.kafka.common.network.ListenerName) TransportLayer(org.apache.kafka.common.network.TransportLayer) JaasContext(org.apache.kafka.common.security.JaasContext) NioEchoServer(org.apache.kafka.common.network.NioEchoServer) TestSecurityConfig(org.apache.kafka.common.security.TestSecurityConfig) SaslChannelBuilder(org.apache.kafka.common.network.SaslChannelBuilder) Map(java.util.Map) HashMap(java.util.HashMap) ApiVersionsResponseData(org.apache.kafka.common.message.ApiVersionsResponseData)

Aggregations

ApiVersion (org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion)28 Test (org.junit.jupiter.api.Test)12 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)9 ApiKeys (org.apache.kafka.common.protocol.ApiKeys)7 ApiVersionCollection (org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersionCollection)6 ApiVersionsResponse (org.apache.kafka.common.requests.ApiVersionsResponse)6 NodeApiVersions (org.apache.kafka.clients.NodeApiVersions)5 ApiVersionsResponseData (org.apache.kafka.common.message.ApiVersionsResponseData)4 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)3 EnumSource (org.junit.jupiter.params.provider.EnumSource)3 ByteBuffer (java.nio.ByteBuffer)2 Node (org.apache.kafka.common.Node)2 DeleteGroupsResponseData (org.apache.kafka.common.message.DeleteGroupsResponseData)2 DeletableGroupResult (org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResult)2 DeletableGroupResultCollection (org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResultCollection)2 ListenerName (org.apache.kafka.common.network.ListenerName)2 NioEchoServer (org.apache.kafka.common.network.NioEchoServer)2 SaslChannelBuilder (org.apache.kafka.common.network.SaslChannelBuilder)2 ApiVersionsRequest (org.apache.kafka.common.requests.ApiVersionsRequest)2 DeleteGroupsResponse (org.apache.kafka.common.requests.DeleteGroupsResponse)2