use of org.apache.kafka.common.Uuid in project kafka by apache.
the class AclsImage method write.
public void write(Consumer<List<ApiMessageAndVersion>> out) {
List<ApiMessageAndVersion> batch = new ArrayList<>();
for (Entry<Uuid, StandardAcl> entry : acls.entrySet()) {
StandardAclWithId aclWithId = new StandardAclWithId(entry.getKey(), entry.getValue());
batch.add(new ApiMessageAndVersion(aclWithId.toRecord(), (short) 0));
}
out.accept(batch);
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class MockController method deleteTopics.
@Override
public synchronized CompletableFuture<Map<Uuid, ApiError>> deleteTopics(long deadlineNs, Collection<Uuid> topicIds) {
if (!active) {
CompletableFuture<Map<Uuid, ApiError>> future = new CompletableFuture<>();
future.completeExceptionally(NOT_CONTROLLER_EXCEPTION);
return future;
}
Map<Uuid, ApiError> results = new HashMap<>();
for (Uuid topicId : topicIds) {
MockTopic topic = topics.remove(topicId);
if (topic == null) {
results.put(topicId, new ApiError(Errors.UNKNOWN_TOPIC_ID));
} else {
topicNameToId.remove(topic.name);
results.put(topicId, ApiError.NONE);
}
}
return CompletableFuture.completedFuture(results);
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class FetchSessionHandlerTest method testSessionEpochWhenMixedUsageOfTopicIDs.
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testSessionEpochWhenMixedUsageOfTopicIDs(boolean startsWithTopicIds) {
Uuid fooId = startsWithTopicIds ? Uuid.randomUuid() : Uuid.ZERO_UUID;
Uuid barId = startsWithTopicIds ? Uuid.ZERO_UUID : Uuid.randomUuid();
short responseVersion = startsWithTopicIds ? ApiKeys.FETCH.latestVersion() : 12;
TopicPartition tp0 = new TopicPartition("foo", 0);
TopicPartition tp1 = new TopicPartition("bar", 1);
FetchSessionHandler handler = new FetchSessionHandler(LOG_CONTEXT, 1);
FetchSessionHandler.Builder builder = handler.newBuilder();
builder.add(tp0, new FetchRequest.PartitionData(fooId, 0, 100, 200, Optional.empty()));
FetchSessionHandler.FetchRequestData data = builder.build();
assertMapsEqual(reqMap(new ReqEntry("foo", fooId, 0, 0, 100, 200)), data.toSend(), data.sessionPartitions());
assertTrue(data.metadata().isFull());
assertEquals(startsWithTopicIds, data.canUseTopicIds());
FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, fooId, 10, 20)));
handler.handleResponse(resp, responseVersion);
// Re-add the first partition. Then add a partition with opposite ID usage.
FetchSessionHandler.Builder builder2 = handler.newBuilder();
builder2.add(tp0, new FetchRequest.PartitionData(fooId, 10, 110, 210, Optional.empty()));
builder2.add(tp1, new FetchRequest.PartitionData(barId, 0, 100, 200, Optional.empty()));
FetchSessionHandler.FetchRequestData data2 = builder2.build();
// Should have the same session ID, and the next epoch and can not use topic IDs.
// The receiving broker will handle closing the session.
assertEquals(123, data2.metadata().sessionId(), "Did not use same session");
assertEquals(1, data2.metadata().epoch(), "Did not have final epoch");
assertFalse(data2.canUseTopicIds());
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class FetchSessionHandlerTest method testTopLevelErrorResetsMetadata.
@Test
public void testTopLevelErrorResetsMetadata() {
Map<String, Uuid> topicIds = new HashMap<>();
Map<Uuid, String> topicNames = new HashMap<>();
FetchSessionHandler handler = new FetchSessionHandler(LOG_CONTEXT, 1);
FetchSessionHandler.Builder builder = handler.newBuilder();
addTopicId(topicIds, topicNames, "foo", ApiKeys.FETCH.latestVersion());
Uuid fooId = topicIds.getOrDefault("foo", Uuid.ZERO_UUID);
builder.add(new TopicPartition("foo", 0), new FetchRequest.PartitionData(fooId, 0, 100, 200, Optional.empty()));
builder.add(new TopicPartition("foo", 1), new FetchRequest.PartitionData(fooId, 10, 110, 210, Optional.empty()));
FetchSessionHandler.FetchRequestData data = builder.build();
assertEquals(INVALID_SESSION_ID, data.metadata().sessionId());
assertEquals(INITIAL_EPOCH, data.metadata().epoch());
FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, topicIds.get("foo"), 10, 20), new RespEntry("foo", 1, topicIds.get("foo"), 10, 20)));
handler.handleResponse(resp, ApiKeys.FETCH.latestVersion());
// Test an incremental fetch request which adds an ID unknown to the broker.
FetchSessionHandler.Builder builder2 = handler.newBuilder();
addTopicId(topicIds, topicNames, "unknown", ApiKeys.FETCH.latestVersion());
builder2.add(new TopicPartition("unknown", 0), new FetchRequest.PartitionData(topicIds.getOrDefault("unknown", Uuid.ZERO_UUID), 0, 100, 200, Optional.empty()));
FetchSessionHandler.FetchRequestData data2 = builder2.build();
assertFalse(data2.metadata().isFull());
assertEquals(123, data2.metadata().sessionId());
assertEquals(FetchMetadata.nextEpoch(INITIAL_EPOCH), data2.metadata().epoch());
// Return and handle a response with a top level error
FetchResponse resp2 = FetchResponse.of(Errors.UNKNOWN_TOPIC_ID, 0, 123, respMap(new RespEntry("unknown", 0, Uuid.randomUuid(), Errors.UNKNOWN_TOPIC_ID)));
assertFalse(handler.handleResponse(resp2, ApiKeys.FETCH.latestVersion()));
// Ensure we start with a new epoch. This will close the session in the next request.
FetchSessionHandler.Builder builder3 = handler.newBuilder();
FetchSessionHandler.FetchRequestData data3 = builder3.build();
assertEquals(123, data3.metadata().sessionId());
assertEquals(INITIAL_EPOCH, data3.metadata().epoch());
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class FetchSessionHandlerTest method testIncrementals.
/**
* Test handling an incremental fetch session.
*/
@Test
public void testIncrementals() {
Map<String, Uuid> topicIds = new HashMap<>();
Map<Uuid, String> topicNames = new HashMap<>();
// We want to test both on older versions that do not use topic IDs and on newer versions that do.
List<Short> versions = Arrays.asList((short) 12, ApiKeys.FETCH.latestVersion());
versions.forEach(version -> {
FetchSessionHandler handler = new FetchSessionHandler(LOG_CONTEXT, 1);
FetchSessionHandler.Builder builder = handler.newBuilder();
addTopicId(topicIds, topicNames, "foo", version);
Uuid fooId = topicIds.getOrDefault("foo", Uuid.ZERO_UUID);
TopicPartition foo0 = new TopicPartition("foo", 0);
TopicPartition foo1 = new TopicPartition("foo", 1);
builder.add(foo0, new FetchRequest.PartitionData(fooId, 0, 100, 200, Optional.empty()));
builder.add(foo1, new FetchRequest.PartitionData(fooId, 10, 110, 210, Optional.empty()));
FetchSessionHandler.FetchRequestData data = builder.build();
assertMapsEqual(reqMap(new ReqEntry("foo", fooId, 0, 0, 100, 200), new ReqEntry("foo", fooId, 1, 10, 110, 210)), data.toSend(), data.sessionPartitions());
assertEquals(INVALID_SESSION_ID, data.metadata().sessionId());
assertEquals(INITIAL_EPOCH, data.metadata().epoch());
FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, fooId, 10, 20), new RespEntry("foo", 1, fooId, 10, 20)));
handler.handleResponse(resp, version);
// Test an incremental fetch request which adds one partition and modifies another.
FetchSessionHandler.Builder builder2 = handler.newBuilder();
addTopicId(topicIds, topicNames, "bar", version);
Uuid barId = topicIds.getOrDefault("bar", Uuid.ZERO_UUID);
TopicPartition bar0 = new TopicPartition("bar", 0);
builder2.add(foo0, new FetchRequest.PartitionData(fooId, 0, 100, 200, Optional.empty()));
builder2.add(foo1, new FetchRequest.PartitionData(fooId, 10, 120, 210, Optional.empty()));
builder2.add(bar0, new FetchRequest.PartitionData(barId, 20, 200, 200, Optional.empty()));
FetchSessionHandler.FetchRequestData data2 = builder2.build();
assertFalse(data2.metadata().isFull());
assertMapEquals(reqMap(new ReqEntry("foo", fooId, 0, 0, 100, 200), new ReqEntry("foo", fooId, 1, 10, 120, 210), new ReqEntry("bar", barId, 0, 20, 200, 200)), data2.sessionPartitions());
assertMapEquals(reqMap(new ReqEntry("bar", barId, 0, 20, 200, 200), new ReqEntry("foo", fooId, 1, 10, 120, 210)), data2.toSend());
FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 1, fooId, 20, 20)));
handler.handleResponse(resp2, version);
// Skip building a new request. Test that handling an invalid fetch session epoch response results
// in a request which closes the session.
FetchResponse resp3 = FetchResponse.of(Errors.INVALID_FETCH_SESSION_EPOCH, 0, INVALID_SESSION_ID, respMap());
handler.handleResponse(resp3, version);
FetchSessionHandler.Builder builder4 = handler.newBuilder();
builder4.add(foo0, new FetchRequest.PartitionData(fooId, 0, 100, 200, Optional.empty()));
builder4.add(foo1, new FetchRequest.PartitionData(fooId, 10, 120, 210, Optional.empty()));
builder4.add(bar0, new FetchRequest.PartitionData(barId, 20, 200, 200, Optional.empty()));
FetchSessionHandler.FetchRequestData data4 = builder4.build();
assertTrue(data4.metadata().isFull());
assertEquals(data2.metadata().sessionId(), data4.metadata().sessionId());
assertEquals(INITIAL_EPOCH, data4.metadata().epoch());
assertMapsEqual(reqMap(new ReqEntry("foo", fooId, 0, 0, 100, 200), new ReqEntry("foo", fooId, 1, 10, 120, 210), new ReqEntry("bar", barId, 0, 20, 200, 200)), data4.sessionPartitions(), data4.toSend());
});
}
Aggregations