use of org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion in project kafka by apache.
the class KafkaAdminClientTest method testDeleteConsumerGroupsWithOlderBroker.
@Test
public void testDeleteConsumerGroupsWithOlderBroker() throws Exception {
final List<String> groupIds = singletonList("groupId");
ApiVersion findCoordinatorV3 = new ApiVersion().setApiKey(ApiKeys.FIND_COORDINATOR.id).setMinVersion((short) 0).setMaxVersion((short) 3);
ApiVersion describeGroups = new ApiVersion().setApiKey(ApiKeys.DESCRIBE_GROUPS.id).setMinVersion((short) 0).setMaxVersion(ApiKeys.DELETE_GROUPS.latestVersion());
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(Arrays.asList(findCoordinatorV3, describeGroups)));
// Retriable FindCoordinatorResponse errors should be retried
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode()));
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode()));
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
final DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection();
validResponse.add(new DeletableGroupResult().setGroupId("groupId").setErrorCode(Errors.NONE.code()));
env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(validResponse)));
final DeleteConsumerGroupsResult result = env.adminClient().deleteConsumerGroups(groupIds);
final KafkaFuture<Void> results = result.deletedGroups().get("groupId");
assertNull(results.get());
// should throw error for non-retriable errors
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.GROUP_AUTHORIZATION_FAILED, Node.noNode()));
DeleteConsumerGroupsResult errorResult = env.adminClient().deleteConsumerGroups(groupIds);
TestUtils.assertFutureError(errorResult.deletedGroups().get("groupId"), GroupAuthorizationException.class);
// Retriable errors should be retried
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
final DeletableGroupResultCollection errorResponse = new DeletableGroupResultCollection();
errorResponse.add(new DeletableGroupResult().setGroupId("groupId").setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()));
env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(errorResponse)));
/*
* We need to return two responses here, one for NOT_COORDINATOR call when calling delete a consumer group
* api using coordinator that has moved. This will retry whole operation. So we need to again respond with a
* FindCoordinatorResponse.
*
* And the same reason for the following COORDINATOR_NOT_AVAILABLE error response
*/
DeletableGroupResultCollection coordinatorMoved = new DeletableGroupResultCollection();
coordinatorMoved.add(new DeletableGroupResult().setGroupId("groupId").setErrorCode(Errors.NOT_COORDINATOR.code()));
env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(coordinatorMoved)));
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
coordinatorMoved = new DeletableGroupResultCollection();
coordinatorMoved.add(new DeletableGroupResult().setGroupId("groupId").setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()));
env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(coordinatorMoved)));
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(validResponse)));
errorResult = env.adminClient().deleteConsumerGroups(groupIds);
final KafkaFuture<Void> errorResults = errorResult.deletedGroups().get("groupId");
assertNull(errorResults.get());
}
}
use of org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion in project kafka by apache.
the class ApiVersionsResponseTest method testIntersect.
@Test
public void testIntersect() {
assertFalse(ApiVersionsResponse.intersect(null, null).isPresent());
assertThrows(IllegalArgumentException.class, () -> ApiVersionsResponse.intersect(new ApiVersion().setApiKey((short) 10), new ApiVersion().setApiKey((short) 3)));
short min = 0;
short max = 10;
ApiVersion thisVersion = new ApiVersion().setApiKey(ApiKeys.FETCH.id).setMinVersion(min).setMaxVersion(Short.MAX_VALUE);
ApiVersion other = new ApiVersion().setApiKey(ApiKeys.FETCH.id).setMinVersion(Short.MIN_VALUE).setMaxVersion(max);
ApiVersion expected = new ApiVersion().setApiKey(ApiKeys.FETCH.id).setMinVersion(min).setMaxVersion(max);
assertFalse(ApiVersionsResponse.intersect(thisVersion, null).isPresent());
assertFalse(ApiVersionsResponse.intersect(null, other).isPresent());
assertEquals(expected, ApiVersionsResponse.intersect(thisVersion, other).get());
// test for symmetric
assertEquals(expected, ApiVersionsResponse.intersect(other, thisVersion).get());
}
use of org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion in project kafka by apache.
the class TransactionManager method handleCoordinatorReady.
void handleCoordinatorReady() {
NodeApiVersions nodeApiVersions = transactionCoordinator != null ? apiVersions.get(transactionCoordinator.idString()) : null;
ApiVersion initProducerIdVersion = nodeApiVersions != null ? nodeApiVersions.apiVersion(ApiKeys.INIT_PRODUCER_ID) : null;
this.coordinatorSupportsBumpingEpoch = initProducerIdVersion != null && initProducerIdVersion.maxVersion() >= 3;
}
use of org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion in project kafka by apache.
the class SaslAuthenticatorTest method startServerApiVersionsUnsupportedByClient.
private NioEchoServer startServerApiVersionsUnsupportedByClient(final SecurityProtocol securityProtocol, String saslMechanism) throws Exception {
final ListenerName listenerName = ListenerName.forSecurityProtocol(securityProtocol);
final Map<String, ?> configs = Collections.emptyMap();
final JaasContext jaasContext = JaasContext.loadServerContext(listenerName, saslMechanism, configs);
final Map<String, JaasContext> jaasContexts = Collections.singletonMap(saslMechanism, jaasContext);
boolean isScram = ScramMechanism.isScram(saslMechanism);
if (isScram)
ScramCredentialUtils.createCache(credentialCache, Arrays.asList(saslMechanism));
Supplier<ApiVersionsResponse> apiVersionSupplier = () -> {
ApiVersionCollection versionCollection = new ApiVersionCollection(2);
versionCollection.add(new ApiVersion().setApiKey(ApiKeys.SASL_HANDSHAKE.id).setMinVersion((short) 0).setMaxVersion((short) 100));
versionCollection.add(new ApiVersion().setApiKey(ApiKeys.SASL_AUTHENTICATE.id).setMinVersion((short) 0).setMaxVersion((short) 100));
return new ApiVersionsResponse(new ApiVersionsResponseData().setApiKeys(versionCollection));
};
SaslChannelBuilder serverChannelBuilder = new SaslChannelBuilder(Mode.SERVER, jaasContexts, securityProtocol, listenerName, false, saslMechanism, true, credentialCache, null, null, time, new LogContext(), apiVersionSupplier);
serverChannelBuilder.configure(saslServerConfigs);
server = new NioEchoServer(listenerName, securityProtocol, new TestSecurityConfig(saslServerConfigs), "localhost", serverChannelBuilder, credentialCache, time);
server.start();
return server;
}
use of org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion in project kafka by apache.
the class SaslAuthenticatorTest method startServerWithoutSaslAuthenticateHeader.
private NioEchoServer startServerWithoutSaslAuthenticateHeader(final SecurityProtocol securityProtocol, String saslMechanism) throws Exception {
final ListenerName listenerName = ListenerName.forSecurityProtocol(securityProtocol);
final Map<String, ?> configs = Collections.emptyMap();
final JaasContext jaasContext = JaasContext.loadServerContext(listenerName, saslMechanism, configs);
final Map<String, JaasContext> jaasContexts = Collections.singletonMap(saslMechanism, jaasContext);
boolean isScram = ScramMechanism.isScram(saslMechanism);
if (isScram)
ScramCredentialUtils.createCache(credentialCache, Arrays.asList(saslMechanism));
Supplier<ApiVersionsResponse> apiVersionSupplier = () -> {
ApiVersionsResponse defaultApiVersionResponse = ApiVersionsResponse.defaultApiVersionsResponse(ApiMessageType.ListenerType.ZK_BROKER);
ApiVersionCollection apiVersions = new ApiVersionCollection();
for (ApiVersion apiVersion : defaultApiVersionResponse.data().apiKeys()) {
if (apiVersion.apiKey() != ApiKeys.SASL_AUTHENTICATE.id) {
// ApiVersion can NOT be reused in second ApiVersionCollection
// due to the internal pointers it contains.
apiVersions.add(apiVersion.duplicate());
}
}
ApiVersionsResponseData data = new ApiVersionsResponseData().setErrorCode(Errors.NONE.code()).setThrottleTimeMs(0).setApiKeys(apiVersions);
return new ApiVersionsResponse(data);
};
SaslChannelBuilder serverChannelBuilder = new SaslChannelBuilder(Mode.SERVER, jaasContexts, securityProtocol, listenerName, false, saslMechanism, true, credentialCache, null, null, time, new LogContext(), apiVersionSupplier) {
@Override
protected SaslServerAuthenticator buildServerAuthenticator(Map<String, ?> configs, Map<String, AuthenticateCallbackHandler> callbackHandlers, String id, TransportLayer transportLayer, Map<String, Subject> subjects, Map<String, Long> connectionsMaxReauthMsByMechanism, ChannelMetadataRegistry metadataRegistry) {
return new SaslServerAuthenticator(configs, callbackHandlers, id, subjects, null, listenerName, securityProtocol, transportLayer, connectionsMaxReauthMsByMechanism, metadataRegistry, time, apiVersionSupplier) {
@Override
protected void enableKafkaSaslAuthenticateHeaders(boolean flag) {
// Don't enable Kafka SASL_AUTHENTICATE headers
}
};
}
};
serverChannelBuilder.configure(saslServerConfigs);
server = new NioEchoServer(listenerName, securityProtocol, new TestSecurityConfig(saslServerConfigs), "localhost", serverChannelBuilder, credentialCache, time);
server.start();
return server;
}
Aggregations