use of org.apache.kafka.common.utils.MockTime in project kafka by apache.
the class KafkaAdminClientTest method testDescribeProducersTimeout.
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testDescribeProducersTimeout(boolean timeoutInMetadataLookup) throws Exception {
MockTime time = new MockTime();
try (AdminClientUnitTestEnv env = mockClientEnv(time)) {
TopicPartition topicPartition = new TopicPartition("foo", 0);
int requestTimeoutMs = 15000;
if (!timeoutInMetadataLookup) {
Node leader = env.cluster().nodes().iterator().next();
expectMetadataRequest(env, topicPartition, leader);
}
DescribeProducersOptions options = new DescribeProducersOptions().timeoutMs(requestTimeoutMs);
DescribeProducersResult result = env.adminClient().describeProducers(singleton(topicPartition), options);
assertFalse(result.all().isDone());
time.sleep(requestTimeoutMs);
TestUtils.waitForCondition(() -> result.all().isDone(), "Future failed to timeout after expiration of timeout");
assertTrue(result.all().isCompletedExceptionally());
TestUtils.assertFutureThrows(result.all(), TimeoutException.class);
assertFalse(env.kafkaClient().hasInFlightRequests());
}
}
use of org.apache.kafka.common.utils.MockTime in project kafka by apache.
the class KafkaAdminClientTest method testDeleteConsumerGroupsNumRetries.
@Test
public void testDeleteConsumerGroupsNumRetries() throws Exception {
final Cluster cluster = mockCluster(3, 0);
final Time time = new MockTime();
final List<String> groupIds = singletonList("groupId");
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRIES_CONFIG, "0")) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
final DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection();
validResponse.add(new DeletableGroupResult().setGroupId("groupId").setErrorCode(Errors.NOT_COORDINATOR.code()));
env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(validResponse)));
env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
final DeleteConsumerGroupsResult result = env.adminClient().deleteConsumerGroups(groupIds);
TestUtils.assertFutureError(result.all(), TimeoutException.class);
}
}
use of org.apache.kafka.common.utils.MockTime in project kafka by apache.
the class KafkaAdminClientTest method testRetryDescribeTransactionsAfterNotCoordinatorError.
@Test
public void testRetryDescribeTransactionsAfterNotCoordinatorError() throws Exception {
MockTime time = new MockTime();
int retryBackoffMs = 100;
Cluster cluster = mockCluster(3, 0);
Map<String, Object> configOverride = newStrMap(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "" + retryBackoffMs);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, configOverride)) {
String transactionalId = "foo";
Iterator<Node> nodeIterator = env.cluster().nodes().iterator();
Node coordinator1 = nodeIterator.next();
Node coordinator2 = nodeIterator.next();
env.kafkaClient().prepareResponse(request -> request instanceof FindCoordinatorRequest, new FindCoordinatorResponse(new FindCoordinatorResponseData().setCoordinators(Arrays.asList(new FindCoordinatorResponseData.Coordinator().setKey(transactionalId).setErrorCode(Errors.NONE.code()).setNodeId(coordinator1.id()).setHost(coordinator1.host()).setPort(coordinator1.port())))));
env.kafkaClient().prepareResponseFrom(request -> {
if (!(request instanceof DescribeTransactionsRequest)) {
return false;
} else {
// Backoff needed here for the retry of FindCoordinator
time.sleep(retryBackoffMs);
return true;
}
}, new DescribeTransactionsResponse(new DescribeTransactionsResponseData().setTransactionStates(singletonList(new DescribeTransactionsResponseData.TransactionState().setErrorCode(Errors.NOT_COORDINATOR.code()).setTransactionalId(transactionalId)))), coordinator1);
env.kafkaClient().prepareResponse(request -> request instanceof FindCoordinatorRequest, new FindCoordinatorResponse(new FindCoordinatorResponseData().setCoordinators(Arrays.asList(new FindCoordinatorResponseData.Coordinator().setKey(transactionalId).setErrorCode(Errors.NONE.code()).setNodeId(coordinator2.id()).setHost(coordinator2.host()).setPort(coordinator2.port())))));
TransactionDescription expected = new TransactionDescription(coordinator2.id(), TransactionState.COMPLETE_COMMIT, 12345L, 15, 10000L, OptionalLong.empty(), emptySet());
env.kafkaClient().prepareResponseFrom(request -> request instanceof DescribeTransactionsRequest, new DescribeTransactionsResponse(new DescribeTransactionsResponseData().setTransactionStates(singletonList(new DescribeTransactionsResponseData.TransactionState().setErrorCode(Errors.NONE.code()).setProducerEpoch((short) expected.producerEpoch()).setProducerId(expected.producerId()).setTransactionalId(transactionalId).setTransactionTimeoutMs(10000).setTransactionStartTimeMs(-1).setTransactionState(expected.state().toString())))), coordinator2);
DescribeTransactionsResult result = env.adminClient().describeTransactions(singleton(transactionalId));
KafkaFuture<TransactionDescription> future = result.description(transactionalId);
assertEquals(expected, future.get());
}
}
use of org.apache.kafka.common.utils.MockTime in project kafka by apache.
the class KafkaAdminClientTest method testSuccessfulRetryAfterRequestTimeout.
@Test
public void testSuccessfulRetryAfterRequestTimeout() throws Exception {
HashMap<Integer, Node> nodes = new HashMap<>();
MockTime time = new MockTime();
Node node0 = new Node(0, "localhost", 8121);
nodes.put(0, node0);
Cluster cluster = new Cluster("mockClusterId", nodes.values(), Arrays.asList(new PartitionInfo("foo", 0, node0, new Node[] { node0 }, new Node[] { node0 })), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), nodes.get(0));
final int requestTimeoutMs = 1000;
final int retryBackoffMs = 100;
final int apiTimeoutMs = 3000;
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, String.valueOf(retryBackoffMs), AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(requestTimeoutMs))) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
final ListTopicsResult result = env.adminClient().listTopics(new ListTopicsOptions().timeoutMs(apiTimeoutMs));
// Wait until the first attempt has been sent, then advance the time
TestUtils.waitForCondition(() -> env.kafkaClient().hasInFlightRequests(), "Timed out waiting for Metadata request to be sent");
time.sleep(requestTimeoutMs + 1);
// Wait for the request to be timed out before backing off
TestUtils.waitForCondition(() -> !env.kafkaClient().hasInFlightRequests(), "Timed out waiting for inFlightRequests to be timed out");
time.sleep(retryBackoffMs);
// Since api timeout bound is not hit, AdminClient should retry
TestUtils.waitForCondition(() -> env.kafkaClient().hasInFlightRequests(), "Failed to retry Metadata request");
env.kafkaClient().respond(prepareMetadataResponse(cluster, Errors.NONE));
assertEquals(1, result.listings().get().size());
assertEquals("foo", result.listings().get().iterator().next().name());
}
}
use of org.apache.kafka.common.utils.MockTime in project kafka by apache.
the class KafkaAdminClientTest method testDescribeConsumerGroupRetryBackoff.
@Test
public void testDescribeConsumerGroupRetryBackoff() throws Exception {
MockTime time = new MockTime();
int retryBackoff = 100;
try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, mockCluster(3, 0), newStrMap(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "" + retryBackoff))) {
MockClient mockClient = env.kafkaClient();
mockClient.setNodeApiVersions(NodeApiVersions.create());
AtomicLong firstAttemptTime = new AtomicLong(0);
AtomicLong secondAttemptTime = new AtomicLong(0);
mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
DescribeGroupsResponseData data = new DescribeGroupsResponseData();
data.groups().add(DescribeGroupsResponse.groupMetadata(GROUP_ID, Errors.NOT_COORDINATOR, "", "", "", Collections.emptyList(), Collections.emptySet()));
mockClient.prepareResponse(body -> {
firstAttemptTime.set(time.milliseconds());
return true;
}, new DescribeGroupsResponse(data));
mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
data = new DescribeGroupsResponseData();
data.groups().add(DescribeGroupsResponse.groupMetadata(GROUP_ID, Errors.NONE, "", ConsumerProtocol.PROTOCOL_TYPE, "", Collections.emptyList(), Collections.emptySet()));
mockClient.prepareResponse(body -> {
secondAttemptTime.set(time.milliseconds());
return true;
}, new DescribeGroupsResponse(data));
final KafkaFuture<Map<String, ConsumerGroupDescription>> future = env.adminClient().describeConsumerGroups(singletonList(GROUP_ID)).all();
TestUtils.waitForCondition(() -> mockClient.numAwaitingResponses() == 1, "Failed awaiting DescribeConsumerGroup first request failure");
TestUtils.waitForCondition(() -> ((KafkaAdminClient) env.adminClient()).numPendingCalls() == 1, "Failed to add retry DescribeConsumerGroup call on first failure");
time.sleep(retryBackoff);
future.get();
long actualRetryBackoff = secondAttemptTime.get() - firstAttemptTime.get();
assertEquals(retryBackoff, actualRetryBackoff, "DescribeConsumerGroup retry did not await expected backoff!");
}
}
Aggregations