Search in sources :

Example 1 with RequestSpec

use of org.apache.kafka.clients.admin.internals.AdminApiDriver.RequestSpec in project kafka by apache.

the class AdminApiDriverTest method testCoalescedStaticAndDynamicFulfillment.

@Test
public void testCoalescedStaticAndDynamicFulfillment() {
    Map<String, String> dynamicMapping = map("foo", "c1");
    Map<String, Integer> staticMapping = map("bar", 1);
    TestContext ctx = new TestContext(staticMapping, dynamicMapping);
    // Initially we expect a lookup for the dynamic key and a
    // fulfillment request for the static key
    LookupResult<String> lookupResult = mapped("foo", 1);
    ctx.lookupStrategy().expectLookup(mkSet("foo"), lookupResult);
    ctx.handler.expectRequest(mkSet("bar"), completed("bar", 10L));
    List<RequestSpec<String>> requestSpecs = ctx.driver.poll();
    assertEquals(2, requestSpecs.size());
    RequestSpec<String> lookupSpec = requestSpecs.get(0);
    assertEquals(mkSet("foo"), lookupSpec.keys);
    ctx.assertLookupResponse(lookupSpec, lookupResult);
    // Receive a disconnect from the fulfillment request so that
    // we have an opportunity to coalesce the keys.
    RequestSpec<String> fulfillmentSpec = requestSpecs.get(1);
    assertEquals(mkSet("bar"), fulfillmentSpec.keys);
    ctx.driver.onFailure(ctx.time.milliseconds(), fulfillmentSpec, new DisconnectException());
    // Now we should get two fulfillment requests. One of them will
    // the coalesced dynamic and static keys for broker 1. The other
    // should contain the single dynamic key for broker 0.
    ctx.handler.reset();
    ctx.handler.expectRequest(mkSet("foo", "bar"), completed("foo", 15L, "bar", 30L));
    List<RequestSpec<String>> coalescedSpecs = ctx.driver.poll();
    assertEquals(1, coalescedSpecs.size());
    RequestSpec<String> coalescedSpec = coalescedSpecs.get(0);
    assertEquals(mkSet("foo", "bar"), coalescedSpec.keys);
    // Disconnect in order to ensure that only the dynamic key is unmapped.
    // Then complete the remaining requests.
    ctx.driver.onFailure(ctx.time.milliseconds(), coalescedSpec, new DisconnectException());
    Map<Set<String>, LookupResult<String>> fooLookupRetry = map(mkSet("foo"), mapped("foo", 3));
    Map<Set<String>, ApiResult<String, Long>> barFulfillmentRetry = map(mkSet("bar"), completed("bar", 30L));
    ctx.poll(fooLookupRetry, barFulfillmentRetry);
    Map<Set<String>, ApiResult<String, Long>> fooFulfillmentRetry = map(mkSet("foo"), completed("foo", 15L));
    ctx.poll(emptyMap(), fooFulfillmentRetry);
    ctx.poll(emptyMap(), emptyMap());
}
Also used : HashSet(java.util.HashSet) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) DisconnectException(org.apache.kafka.common.errors.DisconnectException) ApiResult(org.apache.kafka.clients.admin.internals.AdminApiHandler.ApiResult) LookupResult(org.apache.kafka.clients.admin.internals.AdminApiLookupStrategy.LookupResult) RequestSpec(org.apache.kafka.clients.admin.internals.AdminApiDriver.RequestSpec) Test(org.junit.jupiter.api.Test)

Example 2 with RequestSpec

use of org.apache.kafka.clients.admin.internals.AdminApiDriver.RequestSpec in project kafka by apache.

the class AdminApiDriverTest method testRetryLookupAfterDisconnect.

@Test
public void testRetryLookupAfterDisconnect() {
    TestContext ctx = TestContext.dynamicMapped(map("foo", "c1"));
    int initialLeaderId = 1;
    Map<Set<String>, LookupResult<String>> initialLookup = map(mkSet("foo"), mapped("foo", initialLeaderId));
    ctx.poll(initialLookup, emptyMap());
    assertMappedKey(ctx, "foo", initialLeaderId);
    ctx.handler.expectRequest(mkSet("foo"), completed("foo", 15L));
    List<RequestSpec<String>> requestSpecs = ctx.driver.poll();
    assertEquals(1, requestSpecs.size());
    RequestSpec<String> requestSpec = requestSpecs.get(0);
    assertEquals(OptionalInt.of(initialLeaderId), requestSpec.scope.destinationBrokerId());
    ctx.driver.onFailure(ctx.time.milliseconds(), requestSpec, new DisconnectException());
    assertUnmappedKey(ctx, "foo");
    int retryLeaderId = 2;
    ctx.lookupStrategy().expectLookup(mkSet("foo"), mapped("foo", retryLeaderId));
    List<RequestSpec<String>> retryLookupSpecs = ctx.driver.poll();
    assertEquals(1, retryLookupSpecs.size());
    RequestSpec<String> retryLookupSpec = retryLookupSpecs.get(0);
    assertEquals(ctx.time.milliseconds(), retryLookupSpec.nextAllowedTryMs);
    assertEquals(1, retryLookupSpec.tries);
}
Also used : HashSet(java.util.HashSet) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) LookupResult(org.apache.kafka.clients.admin.internals.AdminApiLookupStrategy.LookupResult) RequestSpec(org.apache.kafka.clients.admin.internals.AdminApiDriver.RequestSpec) DisconnectException(org.apache.kafka.common.errors.DisconnectException) Test(org.junit.jupiter.api.Test)

Example 3 with RequestSpec

use of org.apache.kafka.clients.admin.internals.AdminApiDriver.RequestSpec in project kafka by apache.

the class AdminApiDriverTest method testRetryLookupAndDisableBatchAfterNoBatchedFindCoordinatorsException.

@Test
public void testRetryLookupAndDisableBatchAfterNoBatchedFindCoordinatorsException() {
    MockTime time = new MockTime();
    LogContext lc = new LogContext();
    Set<String> groupIds = new HashSet<>(Arrays.asList("g1", "g2"));
    DeleteConsumerGroupsHandler handler = new DeleteConsumerGroupsHandler(lc);
    AdminApiFuture<CoordinatorKey, Void> future = AdminApiFuture.forKeys(groupIds.stream().map(g -> CoordinatorKey.byGroupId(g)).collect(Collectors.toSet()));
    AdminApiDriver<CoordinatorKey, Void> driver = new AdminApiDriver<>(handler, future, time.milliseconds() + API_TIMEOUT_MS, RETRY_BACKOFF_MS, new LogContext());
    assertTrue(((CoordinatorStrategy) handler.lookupStrategy()).batch);
    List<RequestSpec<CoordinatorKey>> requestSpecs = driver.poll();
    // Expect CoordinatorStrategy to try resolving all coordinators in a single request
    assertEquals(1, requestSpecs.size());
    RequestSpec<CoordinatorKey> requestSpec = requestSpecs.get(0);
    driver.onFailure(time.milliseconds(), requestSpec, new NoBatchedFindCoordinatorsException("message"));
    assertFalse(((CoordinatorStrategy) handler.lookupStrategy()).batch);
    // Batching is now disabled, so we now have a request per groupId
    List<RequestSpec<CoordinatorKey>> retryLookupSpecs = driver.poll();
    assertEquals(groupIds.size(), retryLookupSpecs.size());
    // These new requests are treated a new requests and not retries
    for (RequestSpec<CoordinatorKey> retryLookupSpec : retryLookupSpecs) {
        assertEquals(0, retryLookupSpec.nextAllowedTryMs);
        assertEquals(0, retryLookupSpec.tries);
    }
}
Also used : NoBatchedFindCoordinatorsException(org.apache.kafka.common.requests.FindCoordinatorRequest.NoBatchedFindCoordinatorsException) LogContext(org.apache.kafka.common.utils.LogContext) RequestSpec(org.apache.kafka.clients.admin.internals.AdminApiDriver.RequestSpec) MockTime(org.apache.kafka.common.utils.MockTime) HashSet(java.util.HashSet) Test(org.junit.jupiter.api.Test)

Aggregations

HashSet (java.util.HashSet)3 RequestSpec (org.apache.kafka.clients.admin.internals.AdminApiDriver.RequestSpec)3 Test (org.junit.jupiter.api.Test)3 Set (java.util.Set)2 LookupResult (org.apache.kafka.clients.admin.internals.AdminApiLookupStrategy.LookupResult)2 DisconnectException (org.apache.kafka.common.errors.DisconnectException)2 Utils.mkSet (org.apache.kafka.common.utils.Utils.mkSet)2 ApiResult (org.apache.kafka.clients.admin.internals.AdminApiHandler.ApiResult)1 NoBatchedFindCoordinatorsException (org.apache.kafka.common.requests.FindCoordinatorRequest.NoBatchedFindCoordinatorsException)1 LogContext (org.apache.kafka.common.utils.LogContext)1 MockTime (org.apache.kafka.common.utils.MockTime)1