Search in sources :

Example 86 with Uuid

use of org.apache.kafka.common.Uuid in project kafka by apache.

the class AclsImage method write.

public void write(Consumer<List<ApiMessageAndVersion>> out) {
    List<ApiMessageAndVersion> batch = new ArrayList<>();
    for (Entry<Uuid, StandardAcl> entry : acls.entrySet()) {
        StandardAclWithId aclWithId = new StandardAclWithId(entry.getKey(), entry.getValue());
        batch.add(new ApiMessageAndVersion(aclWithId.toRecord(), (short) 0));
    }
    out.accept(batch);
}
Also used : Uuid(org.apache.kafka.common.Uuid) StandardAclWithId(org.apache.kafka.metadata.authorizer.StandardAclWithId) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) ArrayList(java.util.ArrayList) StandardAcl(org.apache.kafka.metadata.authorizer.StandardAcl)

Example 87 with Uuid

use of org.apache.kafka.common.Uuid in project kafka by apache.

the class MockController method deleteTopics.

@Override
public synchronized CompletableFuture<Map<Uuid, ApiError>> deleteTopics(long deadlineNs, Collection<Uuid> topicIds) {
    if (!active) {
        CompletableFuture<Map<Uuid, ApiError>> future = new CompletableFuture<>();
        future.completeExceptionally(NOT_CONTROLLER_EXCEPTION);
        return future;
    }
    Map<Uuid, ApiError> results = new HashMap<>();
    for (Uuid topicId : topicIds) {
        MockTopic topic = topics.remove(topicId);
        if (topic == null) {
            results.put(topicId, new ApiError(Errors.UNKNOWN_TOPIC_ID));
        } else {
            topicNameToId.remove(topic.name);
            results.put(topicId, ApiError.NONE);
        }
    }
    return CompletableFuture.completedFuture(results);
}
Also used : CompletableFuture(java.util.concurrent.CompletableFuture) Uuid(org.apache.kafka.common.Uuid) HashMap(java.util.HashMap) ApiError(org.apache.kafka.common.requests.ApiError) HashMap(java.util.HashMap) Map(java.util.Map)

Example 88 with Uuid

use of org.apache.kafka.common.Uuid in project kafka by apache.

the class FetchSessionHandlerTest method testSessionEpochWhenMixedUsageOfTopicIDs.

@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testSessionEpochWhenMixedUsageOfTopicIDs(boolean startsWithTopicIds) {
    Uuid fooId = startsWithTopicIds ? Uuid.randomUuid() : Uuid.ZERO_UUID;
    Uuid barId = startsWithTopicIds ? Uuid.ZERO_UUID : Uuid.randomUuid();
    short responseVersion = startsWithTopicIds ? ApiKeys.FETCH.latestVersion() : 12;
    TopicPartition tp0 = new TopicPartition("foo", 0);
    TopicPartition tp1 = new TopicPartition("bar", 1);
    FetchSessionHandler handler = new FetchSessionHandler(LOG_CONTEXT, 1);
    FetchSessionHandler.Builder builder = handler.newBuilder();
    builder.add(tp0, new FetchRequest.PartitionData(fooId, 0, 100, 200, Optional.empty()));
    FetchSessionHandler.FetchRequestData data = builder.build();
    assertMapsEqual(reqMap(new ReqEntry("foo", fooId, 0, 0, 100, 200)), data.toSend(), data.sessionPartitions());
    assertTrue(data.metadata().isFull());
    assertEquals(startsWithTopicIds, data.canUseTopicIds());
    FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, fooId, 10, 20)));
    handler.handleResponse(resp, responseVersion);
    // Re-add the first partition. Then add a partition with opposite ID usage.
    FetchSessionHandler.Builder builder2 = handler.newBuilder();
    builder2.add(tp0, new FetchRequest.PartitionData(fooId, 10, 110, 210, Optional.empty()));
    builder2.add(tp1, new FetchRequest.PartitionData(barId, 0, 100, 200, Optional.empty()));
    FetchSessionHandler.FetchRequestData data2 = builder2.build();
    // Should have the same session ID, and the next epoch and can not use topic IDs.
    // The receiving broker will handle closing the session.
    assertEquals(123, data2.metadata().sessionId(), "Did not use same session");
    assertEquals(1, data2.metadata().epoch(), "Did not have final epoch");
    assertFalse(data2.canUseTopicIds());
}
Also used : Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) FetchRequest(org.apache.kafka.common.requests.FetchRequest) FetchResponse(org.apache.kafka.common.requests.FetchResponse) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 89 with Uuid

use of org.apache.kafka.common.Uuid in project kafka by apache.

the class FetchSessionHandlerTest method testTopLevelErrorResetsMetadata.

@Test
public void testTopLevelErrorResetsMetadata() {
    Map<String, Uuid> topicIds = new HashMap<>();
    Map<Uuid, String> topicNames = new HashMap<>();
    FetchSessionHandler handler = new FetchSessionHandler(LOG_CONTEXT, 1);
    FetchSessionHandler.Builder builder = handler.newBuilder();
    addTopicId(topicIds, topicNames, "foo", ApiKeys.FETCH.latestVersion());
    Uuid fooId = topicIds.getOrDefault("foo", Uuid.ZERO_UUID);
    builder.add(new TopicPartition("foo", 0), new FetchRequest.PartitionData(fooId, 0, 100, 200, Optional.empty()));
    builder.add(new TopicPartition("foo", 1), new FetchRequest.PartitionData(fooId, 10, 110, 210, Optional.empty()));
    FetchSessionHandler.FetchRequestData data = builder.build();
    assertEquals(INVALID_SESSION_ID, data.metadata().sessionId());
    assertEquals(INITIAL_EPOCH, data.metadata().epoch());
    FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, topicIds.get("foo"), 10, 20), new RespEntry("foo", 1, topicIds.get("foo"), 10, 20)));
    handler.handleResponse(resp, ApiKeys.FETCH.latestVersion());
    // Test an incremental fetch request which adds an ID unknown to the broker.
    FetchSessionHandler.Builder builder2 = handler.newBuilder();
    addTopicId(topicIds, topicNames, "unknown", ApiKeys.FETCH.latestVersion());
    builder2.add(new TopicPartition("unknown", 0), new FetchRequest.PartitionData(topicIds.getOrDefault("unknown", Uuid.ZERO_UUID), 0, 100, 200, Optional.empty()));
    FetchSessionHandler.FetchRequestData data2 = builder2.build();
    assertFalse(data2.metadata().isFull());
    assertEquals(123, data2.metadata().sessionId());
    assertEquals(FetchMetadata.nextEpoch(INITIAL_EPOCH), data2.metadata().epoch());
    // Return and handle a response with a top level error
    FetchResponse resp2 = FetchResponse.of(Errors.UNKNOWN_TOPIC_ID, 0, 123, respMap(new RespEntry("unknown", 0, Uuid.randomUuid(), Errors.UNKNOWN_TOPIC_ID)));
    assertFalse(handler.handleResponse(resp2, ApiKeys.FETCH.latestVersion()));
    // Ensure we start with a new epoch. This will close the session in the next request.
    FetchSessionHandler.Builder builder3 = handler.newBuilder();
    FetchSessionHandler.FetchRequestData data3 = builder3.build();
    assertEquals(123, data3.metadata().sessionId());
    assertEquals(INITIAL_EPOCH, data3.metadata().epoch());
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) FetchResponse(org.apache.kafka.common.requests.FetchResponse) Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) FetchRequest(org.apache.kafka.common.requests.FetchRequest) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 90 with Uuid

use of org.apache.kafka.common.Uuid in project kafka by apache.

the class FetchSessionHandlerTest method testIncrementals.

/**
 * Test handling an incremental fetch session.
 */
@Test
public void testIncrementals() {
    Map<String, Uuid> topicIds = new HashMap<>();
    Map<Uuid, String> topicNames = new HashMap<>();
    // We want to test both on older versions that do not use topic IDs and on newer versions that do.
    List<Short> versions = Arrays.asList((short) 12, ApiKeys.FETCH.latestVersion());
    versions.forEach(version -> {
        FetchSessionHandler handler = new FetchSessionHandler(LOG_CONTEXT, 1);
        FetchSessionHandler.Builder builder = handler.newBuilder();
        addTopicId(topicIds, topicNames, "foo", version);
        Uuid fooId = topicIds.getOrDefault("foo", Uuid.ZERO_UUID);
        TopicPartition foo0 = new TopicPartition("foo", 0);
        TopicPartition foo1 = new TopicPartition("foo", 1);
        builder.add(foo0, new FetchRequest.PartitionData(fooId, 0, 100, 200, Optional.empty()));
        builder.add(foo1, new FetchRequest.PartitionData(fooId, 10, 110, 210, Optional.empty()));
        FetchSessionHandler.FetchRequestData data = builder.build();
        assertMapsEqual(reqMap(new ReqEntry("foo", fooId, 0, 0, 100, 200), new ReqEntry("foo", fooId, 1, 10, 110, 210)), data.toSend(), data.sessionPartitions());
        assertEquals(INVALID_SESSION_ID, data.metadata().sessionId());
        assertEquals(INITIAL_EPOCH, data.metadata().epoch());
        FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, fooId, 10, 20), new RespEntry("foo", 1, fooId, 10, 20)));
        handler.handleResponse(resp, version);
        // Test an incremental fetch request which adds one partition and modifies another.
        FetchSessionHandler.Builder builder2 = handler.newBuilder();
        addTopicId(topicIds, topicNames, "bar", version);
        Uuid barId = topicIds.getOrDefault("bar", Uuid.ZERO_UUID);
        TopicPartition bar0 = new TopicPartition("bar", 0);
        builder2.add(foo0, new FetchRequest.PartitionData(fooId, 0, 100, 200, Optional.empty()));
        builder2.add(foo1, new FetchRequest.PartitionData(fooId, 10, 120, 210, Optional.empty()));
        builder2.add(bar0, new FetchRequest.PartitionData(barId, 20, 200, 200, Optional.empty()));
        FetchSessionHandler.FetchRequestData data2 = builder2.build();
        assertFalse(data2.metadata().isFull());
        assertMapEquals(reqMap(new ReqEntry("foo", fooId, 0, 0, 100, 200), new ReqEntry("foo", fooId, 1, 10, 120, 210), new ReqEntry("bar", barId, 0, 20, 200, 200)), data2.sessionPartitions());
        assertMapEquals(reqMap(new ReqEntry("bar", barId, 0, 20, 200, 200), new ReqEntry("foo", fooId, 1, 10, 120, 210)), data2.toSend());
        FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 1, fooId, 20, 20)));
        handler.handleResponse(resp2, version);
        // Skip building a new request.  Test that handling an invalid fetch session epoch response results
        // in a request which closes the session.
        FetchResponse resp3 = FetchResponse.of(Errors.INVALID_FETCH_SESSION_EPOCH, 0, INVALID_SESSION_ID, respMap());
        handler.handleResponse(resp3, version);
        FetchSessionHandler.Builder builder4 = handler.newBuilder();
        builder4.add(foo0, new FetchRequest.PartitionData(fooId, 0, 100, 200, Optional.empty()));
        builder4.add(foo1, new FetchRequest.PartitionData(fooId, 10, 120, 210, Optional.empty()));
        builder4.add(bar0, new FetchRequest.PartitionData(barId, 20, 200, 200, Optional.empty()));
        FetchSessionHandler.FetchRequestData data4 = builder4.build();
        assertTrue(data4.metadata().isFull());
        assertEquals(data2.metadata().sessionId(), data4.metadata().sessionId());
        assertEquals(INITIAL_EPOCH, data4.metadata().epoch());
        assertMapsEqual(reqMap(new ReqEntry("foo", fooId, 0, 0, 100, 200), new ReqEntry("foo", fooId, 1, 10, 120, 210), new ReqEntry("bar", barId, 0, 20, 200, 200)), data4.sessionPartitions(), data4.toSend());
    });
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) FetchResponse(org.apache.kafka.common.requests.FetchResponse) Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) FetchRequest(org.apache.kafka.common.requests.FetchRequest) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Aggregations

Uuid (org.apache.kafka.common.Uuid)95 Test (org.junit.jupiter.api.Test)55 HashMap (java.util.HashMap)42 TopicPartition (org.apache.kafka.common.TopicPartition)40 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)30 ArrayList (java.util.ArrayList)29 Map (java.util.Map)21 ApiMessageAndVersion (org.apache.kafka.server.common.ApiMessageAndVersion)21 LinkedHashMap (java.util.LinkedHashMap)18 List (java.util.List)15 FetchRequest (org.apache.kafka.common.requests.FetchRequest)14 TopicIdPartition (org.apache.kafka.common.TopicIdPartition)13 Errors (org.apache.kafka.common.protocol.Errors)12 FetchResponse (org.apache.kafka.common.requests.FetchResponse)12 Collections (java.util.Collections)11 ByteBuffer (java.nio.ByteBuffer)10 Node (org.apache.kafka.common.Node)10 CreateTopicsResponseData (org.apache.kafka.common.message.CreateTopicsResponseData)10 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)10 PartitionRegistration (org.apache.kafka.metadata.PartitionRegistration)10