use of org.apache.kafka.common.Uuid in project kafka by apache.
the class Fetcher method prepareFetchRequests.
/**
* Create fetch requests for all nodes for which we have assigned partitions
* that have no existing requests in flight.
*/
private Map<Node, FetchSessionHandler.FetchRequestData> prepareFetchRequests() {
Map<Node, FetchSessionHandler.Builder> fetchable = new LinkedHashMap<>();
validatePositionsOnMetadataChange();
long currentTimeMs = time.milliseconds();
Map<String, Uuid> topicIds = metadata.topicIds();
for (TopicPartition partition : fetchablePartitions()) {
FetchPosition position = this.subscriptions.position(partition);
if (position == null) {
throw new IllegalStateException("Missing position for fetchable partition " + partition);
}
Optional<Node> leaderOpt = position.currentLeader.leader;
if (!leaderOpt.isPresent()) {
log.debug("Requesting metadata update for partition {} since the position {} is missing the current leader node", partition, position);
metadata.requestUpdate();
continue;
}
// Use the preferred read replica if set, otherwise the position's leader
Node node = selectReadReplica(partition, leaderOpt.get(), currentTimeMs);
if (client.isUnavailable(node)) {
client.maybeThrowAuthFailure(node);
// If we try to send during the reconnect backoff window, then the request is just
// going to be failed anyway before being sent, so skip the send for now
log.trace("Skipping fetch for partition {} because node {} is awaiting reconnect backoff", partition, node);
} else if (this.nodesWithPendingFetchRequests.contains(node.id())) {
log.trace("Skipping fetch for partition {} because previous request to {} has not been processed", partition, node);
} else {
// if there is a leader and no in-flight requests, issue a new fetch
FetchSessionHandler.Builder builder = fetchable.get(node);
if (builder == null) {
int id = node.id();
FetchSessionHandler handler = sessionHandler(id);
if (handler == null) {
handler = new FetchSessionHandler(logContext, id);
sessionHandlers.put(id, handler);
}
builder = handler.newBuilder();
fetchable.put(node, builder);
}
builder.add(partition, new FetchRequest.PartitionData(topicIds.getOrDefault(partition.topic(), Uuid.ZERO_UUID), position.offset, FetchRequest.INVALID_LOG_START_OFFSET, this.fetchSize, position.currentLeader.epoch, Optional.empty()));
log.debug("Added {} fetch request for partition {} at position {} to node {}", isolationLevel, partition, position, node);
}
}
Map<Node, FetchSessionHandler.FetchRequestData> reqs = new LinkedHashMap<>();
for (Map.Entry<Node, FetchSessionHandler.Builder> entry : fetchable.entrySet()) {
reqs.put(entry.getKey(), entry.getValue().build());
}
return reqs;
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class RequestResponseTest method fetchResponseVersionTest.
@Test
public void fetchResponseVersionTest() {
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseData = new LinkedHashMap<>();
Uuid id = Uuid.randomUuid();
Map<Uuid, String> topicNames = Collections.singletonMap(id, "test");
TopicPartition tp = new TopicPartition("test", 0);
MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10));
FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData().setPartitionIndex(0).setHighWatermark(1000000).setLogStartOffset(-1).setRecords(records);
// Use zero UUID since we are comparing with old request versions
responseData.put(new TopicIdPartition(Uuid.ZERO_UUID, tp), partitionData);
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> tpResponseData = new LinkedHashMap<>();
tpResponseData.put(tp, partitionData);
FetchResponse v0Response = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, responseData);
FetchResponse v1Response = FetchResponse.of(Errors.NONE, 10, INVALID_SESSION_ID, responseData);
FetchResponse v0Deserialized = FetchResponse.parse(v0Response.serialize((short) 0), (short) 0);
FetchResponse v1Deserialized = FetchResponse.parse(v1Response.serialize((short) 1), (short) 1);
assertEquals(0, v0Deserialized.throttleTimeMs(), "Throttle time must be zero");
assertEquals(10, v1Deserialized.throttleTimeMs(), "Throttle time must be 10");
assertEquals(tpResponseData, v0Deserialized.responseData(topicNames, (short) 0), "Response data does not match");
assertEquals(tpResponseData, v1Deserialized.responseData(topicNames, (short) 1), "Response data does not match");
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> idResponseData = new LinkedHashMap<>();
idResponseData.put(new TopicIdPartition(id, new TopicPartition("test", 0)), new FetchResponseData.PartitionData().setPartitionIndex(0).setHighWatermark(1000000).setLogStartOffset(-1).setRecords(records));
FetchResponse idTestResponse = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, idResponseData);
FetchResponse v12Deserialized = FetchResponse.parse(idTestResponse.serialize((short) 12), (short) 12);
FetchResponse newestDeserialized = FetchResponse.parse(idTestResponse.serialize(FETCH.latestVersion()), FETCH.latestVersion());
assertTrue(v12Deserialized.topicIds().isEmpty());
assertEquals(1, newestDeserialized.topicIds().size());
assertTrue(newestDeserialized.topicIds().contains(id));
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class RequestResponseTest method createUpdateMetadataRequest.
private UpdateMetadataRequest createUpdateMetadataRequest(short version, String rack) {
List<UpdateMetadataPartitionState> partitionStates = new ArrayList<>();
List<Integer> isr = asList(1, 2);
List<Integer> replicas = asList(1, 2, 3, 4);
List<Integer> offlineReplicas = emptyList();
partitionStates.add(new UpdateMetadataPartitionState().setTopicName("topic5").setPartitionIndex(105).setControllerEpoch(0).setLeader(2).setLeaderEpoch(1).setIsr(isr).setZkVersion(2).setReplicas(replicas).setOfflineReplicas(offlineReplicas));
partitionStates.add(new UpdateMetadataPartitionState().setTopicName("topic5").setPartitionIndex(1).setControllerEpoch(1).setLeader(1).setLeaderEpoch(1).setIsr(isr).setZkVersion(2).setReplicas(replicas).setOfflineReplicas(offlineReplicas));
partitionStates.add(new UpdateMetadataPartitionState().setTopicName("topic20").setPartitionIndex(1).setControllerEpoch(1).setLeader(0).setLeaderEpoch(1).setIsr(isr).setZkVersion(2).setReplicas(replicas).setOfflineReplicas(offlineReplicas));
Map<String, Uuid> topicIds = new HashMap<>();
if (version > 6) {
topicIds.put("topic5", Uuid.randomUuid());
topicIds.put("topic20", Uuid.randomUuid());
}
SecurityProtocol plaintext = SecurityProtocol.PLAINTEXT;
List<UpdateMetadataEndpoint> endpoints1 = new ArrayList<>();
endpoints1.add(new UpdateMetadataEndpoint().setHost("host1").setPort(1223).setSecurityProtocol(plaintext.id).setListener(ListenerName.forSecurityProtocol(plaintext).value()));
List<UpdateMetadataEndpoint> endpoints2 = new ArrayList<>();
endpoints2.add(new UpdateMetadataEndpoint().setHost("host1").setPort(1244).setSecurityProtocol(plaintext.id).setListener(ListenerName.forSecurityProtocol(plaintext).value()));
if (version > 0) {
SecurityProtocol ssl = SecurityProtocol.SSL;
endpoints2.add(new UpdateMetadataEndpoint().setHost("host2").setPort(1234).setSecurityProtocol(ssl.id).setListener(ListenerName.forSecurityProtocol(ssl).value()));
endpoints2.add(new UpdateMetadataEndpoint().setHost("host2").setPort(1334).setSecurityProtocol(ssl.id));
if (version >= 3)
endpoints2.get(1).setListener("CLIENT");
}
List<UpdateMetadataBroker> liveBrokers = asList(new UpdateMetadataBroker().setId(0).setEndpoints(endpoints1).setRack(rack), new UpdateMetadataBroker().setId(1).setEndpoints(endpoints2).setRack(rack));
return new UpdateMetadataRequest.Builder(version, 1, 10, 0, partitionStates, liveBrokers, topicIds).build();
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class LeaderAndIsrRequestTest method testVersionLogic.
/**
* Verifies the logic we have in LeaderAndIsrRequest to present a unified interface across the various versions
* works correctly. For example, `LeaderAndIsrPartitionState.topicName` is not serialiazed/deserialized in
* recent versions, but we set it manually so that we can always present the ungrouped partition states
* independently of the version.
*/
@Test
public void testVersionLogic() {
for (short version : LEADER_AND_ISR.allVersions()) {
List<LeaderAndIsrPartitionState> partitionStates = asList(new LeaderAndIsrPartitionState().setTopicName("topic0").setPartitionIndex(0).setControllerEpoch(2).setLeader(0).setLeaderEpoch(10).setIsr(asList(0, 1)).setZkVersion(10).setReplicas(asList(0, 1, 2)).setAddingReplicas(asList(3)).setRemovingReplicas(asList(2)), new LeaderAndIsrPartitionState().setTopicName("topic0").setPartitionIndex(1).setControllerEpoch(2).setLeader(1).setLeaderEpoch(11).setIsr(asList(1, 2, 3)).setZkVersion(11).setReplicas(asList(1, 2, 3)).setAddingReplicas(emptyList()).setRemovingReplicas(emptyList()), new LeaderAndIsrPartitionState().setTopicName("topic1").setPartitionIndex(0).setControllerEpoch(2).setLeader(2).setLeaderEpoch(11).setIsr(asList(2, 3, 4)).setZkVersion(11).setReplicas(asList(2, 3, 4)).setAddingReplicas(emptyList()).setRemovingReplicas(emptyList()));
List<Node> liveNodes = asList(new Node(0, "host0", 9090), new Node(1, "host1", 9091));
Map<String, Uuid> topicIds = new HashMap<>();
topicIds.put("topic0", Uuid.randomUuid());
topicIds.put("topic1", Uuid.randomUuid());
LeaderAndIsrRequest request = new LeaderAndIsrRequest.Builder(version, 1, 2, 3, partitionStates, topicIds, liveNodes).build();
List<LeaderAndIsrLiveLeader> liveLeaders = liveNodes.stream().map(n -> new LeaderAndIsrLiveLeader().setBrokerId(n.id()).setHostName(n.host()).setPort(n.port())).collect(Collectors.toList());
assertEquals(new HashSet<>(partitionStates), iterableToSet(request.partitionStates()));
assertEquals(liveLeaders, request.liveLeaders());
assertEquals(1, request.controllerId());
assertEquals(2, request.controllerEpoch());
assertEquals(3, request.brokerEpoch());
ByteBuffer byteBuffer = request.serialize();
LeaderAndIsrRequest deserializedRequest = new LeaderAndIsrRequest(new LeaderAndIsrRequestData(new ByteBufferAccessor(byteBuffer), version), version);
// them for earlier versions.
if (version < 3) {
partitionStates.get(0).setAddingReplicas(emptyList()).setRemovingReplicas(emptyList());
}
// TopicStates is an empty map.
if (version < 2) {
topicIds = new HashMap<>();
}
// Zero Uuids in place.
if (version > 1 && version < 5) {
topicIds.put("topic0", Uuid.ZERO_UUID);
topicIds.put("topic1", Uuid.ZERO_UUID);
}
assertEquals(new HashSet<>(partitionStates), iterableToSet(deserializedRequest.partitionStates()));
assertEquals(topicIds, deserializedRequest.topicIds());
assertEquals(liveLeaders, deserializedRequest.liveLeaders());
assertEquals(1, request.controllerId());
assertEquals(2, request.controllerEpoch());
assertEquals(3, request.brokerEpoch());
}
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class LeaderAndIsrResponseTest method testToString.
@Test
public void testToString() {
for (short version : LEADER_AND_ISR.allVersions()) {
LeaderAndIsrResponse response;
if (version < 5) {
List<LeaderAndIsrPartitionError> partitions = createPartitions("foo", asList(Errors.NONE, Errors.CLUSTER_AUTHORIZATION_FAILED));
response = new LeaderAndIsrResponse(new LeaderAndIsrResponseData().setErrorCode(Errors.NONE.code()).setPartitionErrors(partitions), version);
String responseStr = response.toString();
assertTrue(responseStr.contains(LeaderAndIsrResponse.class.getSimpleName()));
assertTrue(responseStr.contains(partitions.toString()));
assertTrue(responseStr.contains("errorCode=" + Errors.NONE.code()));
} else {
Uuid id = Uuid.randomUuid();
LeaderAndIsrTopicErrorCollection topics = createTopic(id, asList(Errors.NONE, Errors.CLUSTER_AUTHORIZATION_FAILED));
response = new LeaderAndIsrResponse(new LeaderAndIsrResponseData().setErrorCode(Errors.NONE.code()).setTopics(topics), version);
String responseStr = response.toString();
assertTrue(responseStr.contains(LeaderAndIsrResponse.class.getSimpleName()));
assertTrue(responseStr.contains(topics.toString()));
assertTrue(responseStr.contains(id.toString()));
assertTrue(responseStr.contains("errorCode=" + Errors.NONE.code()));
}
}
}
Aggregations