use of org.apache.kafka.common.utils.MockTime in project kafka by apache.
the class OAuthBearerUnsecuredLoginCallbackHandlerTest method validOptionsWithExplicitOptionValues.
@SuppressWarnings("unchecked")
@Test
public void validOptionsWithExplicitOptionValues() throws IOException, UnsupportedCallbackException {
String explicitScope1 = "scope1";
String explicitScope2 = "scope2";
String explicitScopeClaimName = "putScopeInHere";
String principalClaimName = "principal";
final String[] scopeClaimNameOptionValues = { null, explicitScopeClaimName };
for (String scopeClaimNameOptionValue : scopeClaimNameOptionValues) {
Map<String, String> options = new HashMap<>();
String user = "user";
options.put("unsecuredLoginStringClaim_" + principalClaimName, user);
options.put("unsecuredLoginListClaim_" + "list", ",1,2,");
options.put("unsecuredLoginListClaim_" + "emptyList1", "");
options.put("unsecuredLoginListClaim_" + "emptyList2", ",");
options.put("unsecuredLoginNumberClaim_" + "number", "1");
long lifetmeSeconds = 10000;
options.put("unsecuredLoginLifetimeSeconds", String.valueOf(lifetmeSeconds));
options.put("unsecuredLoginPrincipalClaimName", principalClaimName);
if (scopeClaimNameOptionValue != null)
options.put("unsecuredLoginScopeClaimName", scopeClaimNameOptionValue);
String actualScopeClaimName = scopeClaimNameOptionValue == null ? "scope" : explicitScopeClaimName;
options.put("unsecuredLoginListClaim_" + actualScopeClaimName, String.format("|%s|%s", explicitScope1, explicitScope2));
MockTime mockTime = new MockTime();
OAuthBearerUnsecuredLoginCallbackHandler callbackHandler = createCallbackHandler(options, mockTime);
OAuthBearerTokenCallback callback = new OAuthBearerTokenCallback();
callbackHandler.handle(new Callback[] { callback });
OAuthBearerUnsecuredJws jws = (OAuthBearerUnsecuredJws) callback.token();
assertNotNull(jws, "create token failed");
long startMs = mockTime.milliseconds();
confirmCorrectValues(jws, user, startMs, lifetmeSeconds * 1000);
Map<String, Object> claims = jws.claims();
assertEquals(new HashSet<>(Arrays.asList(actualScopeClaimName, principalClaimName, "iat", "exp", "number", "list", "emptyList1", "emptyList2")), claims.keySet());
assertEquals(new HashSet<>(Arrays.asList(explicitScope1, explicitScope2)), new HashSet<>((List<String>) claims.get(actualScopeClaimName)));
assertEquals(new HashSet<>(Arrays.asList(explicitScope1, explicitScope2)), jws.scope());
assertEquals(1.0, jws.claim("number", Number.class));
assertEquals(Arrays.asList("1", "2", ""), jws.claim("list", List.class));
assertEquals(Collections.emptyList(), jws.claim("emptyList1", List.class));
assertEquals(Collections.emptyList(), jws.claim("emptyList2", List.class));
}
}
use of org.apache.kafka.common.utils.MockTime in project kafka by apache.
the class KafkaAdminClientTest method testListConsumerGroupOffsetsRetryBackoff.
@Test
public void testListConsumerGroupOffsetsRetryBackoff() throws Exception {
MockTime time = new MockTime();
int retryBackoff = 100;
try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, mockCluster(3, 0), newStrMap(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "" + retryBackoff))) {
MockClient mockClient = env.kafkaClient();
mockClient.setNodeApiVersions(NodeApiVersions.create());
AtomicLong firstAttemptTime = new AtomicLong(0);
AtomicLong secondAttemptTime = new AtomicLong(0);
mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
mockClient.prepareResponse(body -> {
firstAttemptTime.set(time.milliseconds());
return true;
}, new OffsetFetchResponse(Errors.NOT_COORDINATOR, Collections.emptyMap()));
mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
mockClient.prepareResponse(body -> {
secondAttemptTime.set(time.milliseconds());
return true;
}, new OffsetFetchResponse(Errors.NONE, Collections.emptyMap()));
final KafkaFuture<Map<TopicPartition, OffsetAndMetadata>> future = env.adminClient().listConsumerGroupOffsets("group-0").partitionsToOffsetAndMetadata();
TestUtils.waitForCondition(() -> mockClient.numAwaitingResponses() == 1, "Failed awaiting ListConsumerGroupOffsets first request failure");
TestUtils.waitForCondition(() -> ((KafkaAdminClient) env.adminClient()).numPendingCalls() == 1, "Failed to add retry ListConsumerGroupOffsets call on first failure");
time.sleep(retryBackoff);
future.get();
long actualRetryBackoff = secondAttemptTime.get() - firstAttemptTime.get();
assertEquals(retryBackoff, actualRetryBackoff, "ListConsumerGroupOffsets retry did not await expected backoff!");
}
}
use of org.apache.kafka.common.utils.MockTime in project kafka by apache.
the class KafkaAdminClientTest method testListConsumerGroupsMetadataFailure.
@Test
public void testListConsumerGroupsMetadataFailure() throws Exception {
final Cluster cluster = mockCluster(3, 0);
final Time time = new MockTime();
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRIES_CONFIG, "0")) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
// Empty metadata causes the request to fail since we have no list of brokers
// to send the ListGroups requests to
env.kafkaClient().prepareResponse(RequestTestUtils.metadataResponse(Collections.emptyList(), env.cluster().clusterResource().clusterId(), -1, Collections.emptyList()));
final ListConsumerGroupsResult result = env.adminClient().listConsumerGroups();
TestUtils.assertFutureError(result.all(), KafkaException.class);
}
}
use of org.apache.kafka.common.utils.MockTime in project kafka by apache.
the class KafkaAdminClientTest method testDeleteConsumerGroupOffsetsRetryBackoff.
@Test
public void testDeleteConsumerGroupOffsetsRetryBackoff() throws Exception {
MockTime time = new MockTime();
int retryBackoff = 100;
try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, mockCluster(3, 0), newStrMap(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "" + retryBackoff))) {
MockClient mockClient = env.kafkaClient();
mockClient.setNodeApiVersions(NodeApiVersions.create());
AtomicLong firstAttemptTime = new AtomicLong(0);
AtomicLong secondAttemptTime = new AtomicLong(0);
final TopicPartition tp1 = new TopicPartition("foo", 0);
mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
mockClient.prepareResponse(body -> {
firstAttemptTime.set(time.milliseconds());
return true;
}, prepareOffsetDeleteResponse(Errors.NOT_COORDINATOR));
mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
mockClient.prepareResponse(body -> {
secondAttemptTime.set(time.milliseconds());
return true;
}, prepareOffsetDeleteResponse("foo", 0, Errors.NONE));
final KafkaFuture<Void> future = env.adminClient().deleteConsumerGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())).all();
TestUtils.waitForCondition(() -> mockClient.numAwaitingResponses() == 1, "Failed awaiting DeleteConsumerGroupOffsets first request failure");
TestUtils.waitForCondition(() -> ((KafkaAdminClient) env.adminClient()).numPendingCalls() == 1, "Failed to add retry DeleteConsumerGroupOffsets call on first failure");
time.sleep(retryBackoff);
future.get();
long actualRetryBackoff = secondAttemptTime.get() - firstAttemptTime.get();
assertEquals(retryBackoff, actualRetryBackoff, "DeleteConsumerGroupOffsets retry did not await expected backoff!");
}
}
use of org.apache.kafka.common.utils.MockTime in project kafka by apache.
the class KafkaAdminClientTest method testDeleteConsumerGroupOffsetsNumRetries.
@Test
public void testDeleteConsumerGroupOffsetsNumRetries() throws Exception {
final Cluster cluster = mockCluster(3, 0);
final Time time = new MockTime();
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRIES_CONFIG, "0")) {
final TopicPartition tp1 = new TopicPartition("foo", 0);
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
env.kafkaClient().prepareResponse(prepareOffsetDeleteResponse(Errors.NOT_COORDINATOR));
env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
final DeleteConsumerGroupOffsetsResult result = env.adminClient().deleteConsumerGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet()));
TestUtils.assertFutureError(result.all(), TimeoutException.class);
}
}
Aggregations