use of org.apache.kafka.common.requests.DeleteGroupsResponse in project kafka by apache.
the class KafkaAdminClientTest method testDeleteConsumerGroupsNumRetries.
@Test
public void testDeleteConsumerGroupsNumRetries() throws Exception {
final Cluster cluster = mockCluster(3, 0);
final Time time = new MockTime();
final List<String> groupIds = singletonList("groupId");
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRIES_CONFIG, "0")) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
final DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection();
validResponse.add(new DeletableGroupResult().setGroupId("groupId").setErrorCode(Errors.NOT_COORDINATOR.code()));
env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(validResponse)));
env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
final DeleteConsumerGroupsResult result = env.adminClient().deleteConsumerGroups(groupIds);
TestUtils.assertFutureError(result.all(), TimeoutException.class);
}
}
use of org.apache.kafka.common.requests.DeleteGroupsResponse in project kafka by apache.
the class KafkaAdminClientTest method testDeleteMultipleConsumerGroupsWithOlderBroker.
@Test
public void testDeleteMultipleConsumerGroupsWithOlderBroker() throws Exception {
final List<String> groupIds = asList("group1", "group2");
ApiVersion findCoordinatorV3 = new ApiVersion().setApiKey(ApiKeys.FIND_COORDINATOR.id).setMinVersion((short) 0).setMaxVersion((short) 3);
ApiVersion describeGroups = new ApiVersion().setApiKey(ApiKeys.DESCRIBE_GROUPS.id).setMinVersion((short) 0).setMaxVersion(ApiKeys.DELETE_GROUPS.latestVersion());
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(Arrays.asList(findCoordinatorV3, describeGroups)));
// Dummy response for MockClient to handle the UnsupportedVersionException correctly to switch from batched to un-batched
env.kafkaClient().prepareResponse(null);
// Retriable FindCoordinatorResponse errors should be retried
for (int i = 0; i < groupIds.size(); i++) {
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode()));
}
for (int i = 0; i < groupIds.size(); i++) {
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
}
final DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection();
validResponse.add(new DeletableGroupResult().setGroupId("group1").setErrorCode(Errors.NONE.code()));
validResponse.add(new DeletableGroupResult().setGroupId("group2").setErrorCode(Errors.NONE.code()));
env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(validResponse)));
final DeleteConsumerGroupsResult result = env.adminClient().deleteConsumerGroups(groupIds);
final KafkaFuture<Void> results = result.deletedGroups().get("group1");
assertNull(results.get(5, TimeUnit.SECONDS));
}
}
use of org.apache.kafka.common.requests.DeleteGroupsResponse in project kafka by apache.
the class KafkaAdminClientTest method testDeleteConsumerGroupsWithOlderBroker.
@Test
public void testDeleteConsumerGroupsWithOlderBroker() throws Exception {
final List<String> groupIds = singletonList("groupId");
ApiVersion findCoordinatorV3 = new ApiVersion().setApiKey(ApiKeys.FIND_COORDINATOR.id).setMinVersion((short) 0).setMaxVersion((short) 3);
ApiVersion describeGroups = new ApiVersion().setApiKey(ApiKeys.DESCRIBE_GROUPS.id).setMinVersion((short) 0).setMaxVersion(ApiKeys.DELETE_GROUPS.latestVersion());
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(Arrays.asList(findCoordinatorV3, describeGroups)));
// Retriable FindCoordinatorResponse errors should be retried
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode()));
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode()));
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
final DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection();
validResponse.add(new DeletableGroupResult().setGroupId("groupId").setErrorCode(Errors.NONE.code()));
env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(validResponse)));
final DeleteConsumerGroupsResult result = env.adminClient().deleteConsumerGroups(groupIds);
final KafkaFuture<Void> results = result.deletedGroups().get("groupId");
assertNull(results.get());
// should throw error for non-retriable errors
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.GROUP_AUTHORIZATION_FAILED, Node.noNode()));
DeleteConsumerGroupsResult errorResult = env.adminClient().deleteConsumerGroups(groupIds);
TestUtils.assertFutureError(errorResult.deletedGroups().get("groupId"), GroupAuthorizationException.class);
// Retriable errors should be retried
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
final DeletableGroupResultCollection errorResponse = new DeletableGroupResultCollection();
errorResponse.add(new DeletableGroupResult().setGroupId("groupId").setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()));
env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(errorResponse)));
/*
* We need to return two responses here, one for NOT_COORDINATOR call when calling delete a consumer group
* api using coordinator that has moved. This will retry whole operation. So we need to again respond with a
* FindCoordinatorResponse.
*
* And the same reason for the following COORDINATOR_NOT_AVAILABLE error response
*/
DeletableGroupResultCollection coordinatorMoved = new DeletableGroupResultCollection();
coordinatorMoved.add(new DeletableGroupResult().setGroupId("groupId").setErrorCode(Errors.NOT_COORDINATOR.code()));
env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(coordinatorMoved)));
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
coordinatorMoved = new DeletableGroupResultCollection();
coordinatorMoved.add(new DeletableGroupResult().setGroupId("groupId").setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()));
env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(coordinatorMoved)));
env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(validResponse)));
errorResult = env.adminClient().deleteConsumerGroups(groupIds);
final KafkaFuture<Void> errorResults = errorResult.deletedGroups().get("groupId");
assertNull(errorResults.get());
}
}
use of org.apache.kafka.common.requests.DeleteGroupsResponse in project kafka by apache.
the class KafkaAdminClientTest method testDeleteConsumerGroupsRetryBackoff.
@Test
public void testDeleteConsumerGroupsRetryBackoff() throws Exception {
MockTime time = new MockTime();
int retryBackoff = 100;
final List<String> groupIds = singletonList(GROUP_ID);
try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, mockCluster(3, 0), newStrMap(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "" + retryBackoff))) {
MockClient mockClient = env.kafkaClient();
mockClient.setNodeApiVersions(NodeApiVersions.create());
AtomicLong firstAttemptTime = new AtomicLong(0);
AtomicLong secondAttemptTime = new AtomicLong(0);
mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection();
validResponse.add(new DeletableGroupResult().setGroupId(GROUP_ID).setErrorCode(Errors.NOT_COORDINATOR.code()));
mockClient.prepareResponse(body -> {
firstAttemptTime.set(time.milliseconds());
return true;
}, new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(validResponse)));
mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
validResponse = new DeletableGroupResultCollection();
validResponse.add(new DeletableGroupResult().setGroupId(GROUP_ID).setErrorCode(Errors.NONE.code()));
mockClient.prepareResponse(body -> {
secondAttemptTime.set(time.milliseconds());
return true;
}, new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(validResponse)));
final KafkaFuture<Void> future = env.adminClient().deleteConsumerGroups(groupIds).all();
TestUtils.waitForCondition(() -> mockClient.numAwaitingResponses() == 1, "Failed awaiting DeleteConsumerGroups first request failure");
TestUtils.waitForCondition(() -> ((KafkaAdminClient) env.adminClient()).numPendingCalls() == 1, "Failed to add retry DeleteConsumerGroups call on first failure");
time.sleep(retryBackoff);
future.get();
long actualRetryBackoff = secondAttemptTime.get() - firstAttemptTime.get();
assertEquals(retryBackoff, actualRetryBackoff, "DeleteConsumerGroups retry did not await expected backoff!");
}
}
use of org.apache.kafka.common.requests.DeleteGroupsResponse in project kafka by apache.
the class DeleteConsumerGroupsHandlerTest method handleWithError.
private AdminApiHandler.ApiResult<CoordinatorKey, Void> handleWithError(Errors error) {
DeleteConsumerGroupsHandler handler = new DeleteConsumerGroupsHandler(logContext);
DeleteGroupsResponse response = buildResponse(error);
return handler.handleResponse(new Node(1, "host", 1234), singleton(CoordinatorKey.byGroupId(groupId1)), response);
}
Aggregations