use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class UpdateMetadataRequest method toStruct.
@Override
protected Struct toStruct() {
short version = version();
Struct struct = new Struct(ApiKeys.UPDATE_METADATA_KEY.requestSchema(version));
struct.set(CONTROLLER_ID_KEY_NAME, controllerId);
struct.set(CONTROLLER_EPOCH_KEY_NAME, controllerEpoch);
List<Struct> partitionStatesData = new ArrayList<>(partitionStates.size());
for (Map.Entry<TopicPartition, PartitionState> entry : partitionStates.entrySet()) {
Struct partitionStateData = struct.instance(PARTITION_STATES_KEY_NAME);
TopicPartition topicPartition = entry.getKey();
partitionStateData.set(TOPIC_KEY_NAME, topicPartition.topic());
partitionStateData.set(PARTITION_KEY_NAME, topicPartition.partition());
PartitionState partitionState = entry.getValue();
partitionStateData.set(CONTROLLER_EPOCH_KEY_NAME, partitionState.controllerEpoch);
partitionStateData.set(LEADER_KEY_NAME, partitionState.leader);
partitionStateData.set(LEADER_EPOCH_KEY_NAME, partitionState.leaderEpoch);
partitionStateData.set(ISR_KEY_NAME, partitionState.isr.toArray());
partitionStateData.set(ZK_VERSION_KEY_NAME, partitionState.zkVersion);
partitionStateData.set(REPLICAS_KEY_NAME, partitionState.replicas.toArray());
partitionStatesData.add(partitionStateData);
}
struct.set(PARTITION_STATES_KEY_NAME, partitionStatesData.toArray());
List<Struct> brokersData = new ArrayList<>(liveBrokers.size());
for (Broker broker : liveBrokers) {
Struct brokerData = struct.instance(LIVE_BROKERS_KEY_NAME);
brokerData.set(BROKER_ID_KEY_NAME, broker.id);
if (version == 0) {
EndPoint endPoint = broker.endPoints.get(0);
brokerData.set(HOST_KEY_NAME, endPoint.host);
brokerData.set(PORT_KEY_NAME, endPoint.port);
} else {
List<Struct> endPointsData = new ArrayList<>(broker.endPoints.size());
for (EndPoint endPoint : broker.endPoints) {
Struct endPointData = brokerData.instance(ENDPOINTS_KEY_NAME);
endPointData.set(PORT_KEY_NAME, endPoint.port);
endPointData.set(HOST_KEY_NAME, endPoint.host);
endPointData.set(SECURITY_PROTOCOL_TYPE_KEY_NAME, endPoint.securityProtocol.id);
if (version >= 3)
endPointData.set(LISTENER_NAME_KEY_NAME, endPoint.listenerName.value());
endPointsData.add(endPointData);
}
brokerData.set(ENDPOINTS_KEY_NAME, endPointsData.toArray());
if (version >= 2) {
brokerData.set(RACK_KEY_NAME, broker.rack);
}
}
brokersData.add(brokerData);
}
struct.set(LIVE_BROKERS_KEY_NAME, brokersData.toArray());
return struct;
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class ControlledShutdownResponse method toStruct.
@Override
protected Struct toStruct(short version) {
Struct struct = new Struct(ApiKeys.CONTROLLED_SHUTDOWN_KEY.responseSchema(version));
struct.set(ERROR_CODE_KEY_NAME, error.code());
List<Struct> partitionsRemainingList = new ArrayList<>(partitionsRemaining.size());
for (TopicPartition topicPartition : partitionsRemaining) {
Struct topicPartitionStruct = struct.instance(PARTITIONS_REMAINING_KEY_NAME);
topicPartitionStruct.set(TOPIC_KEY_NAME, topicPartition.topic());
topicPartitionStruct.set(PARTITION_KEY_NAME, topicPartition.partition());
partitionsRemainingList.add(topicPartitionStruct);
}
struct.set(PARTITIONS_REMAINING_KEY_NAME, partitionsRemainingList.toArray());
return struct;
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class ProduceRequest method toStruct.
/**
* Visible for testing.
*/
@Override
public Struct toStruct() {
// Store it in a local variable to protect against concurrent updates
Map<TopicPartition, MemoryRecords> partitionRecords = partitionRecordsOrFail();
Struct struct = new Struct(ApiKeys.PRODUCE.requestSchema(version()));
Map<String, Map<Integer, MemoryRecords>> recordsByTopic = CollectionUtils.groupDataByTopic(partitionRecords);
struct.set(ACKS_KEY_NAME, acks);
struct.set(TIMEOUT_KEY_NAME, timeout);
List<Struct> topicDatas = new ArrayList<>(recordsByTopic.size());
for (Map.Entry<String, Map<Integer, MemoryRecords>> entry : recordsByTopic.entrySet()) {
Struct topicData = struct.instance(TOPIC_DATA_KEY_NAME);
topicData.set(TOPIC_KEY_NAME, entry.getKey());
List<Struct> partitionArray = new ArrayList<>();
for (Map.Entry<Integer, MemoryRecords> partitionEntry : entry.getValue().entrySet()) {
MemoryRecords records = partitionEntry.getValue();
Struct part = topicData.instance(PARTITION_DATA_KEY_NAME).set(PARTITION_KEY_NAME, partitionEntry.getKey()).set(RECORD_SET_KEY_NAME, records);
partitionArray.add(part);
}
topicData.set(PARTITION_DATA_KEY_NAME, partitionArray.toArray());
topicDatas.add(topicData);
}
struct.set(TOPIC_DATA_KEY_NAME, topicDatas.toArray());
return struct;
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class StopReplicaRequest method toStruct.
@Override
protected Struct toStruct() {
Struct struct = new Struct(ApiKeys.STOP_REPLICA.requestSchema(version()));
struct.set(CONTROLLER_ID_KEY_NAME, controllerId);
struct.set(CONTROLLER_EPOCH_KEY_NAME, controllerEpoch);
struct.set(DELETE_PARTITIONS_KEY_NAME, deletePartitions);
List<Struct> partitionDatas = new ArrayList<>(partitions.size());
for (TopicPartition partition : partitions) {
Struct partitionData = struct.instance(PARTITIONS_KEY_NAME);
partitionData.set(TOPIC_KEY_NAME, partition.topic());
partitionData.set(PARTITION_KEY_NAME, partition.partition());
partitionDatas.add(partitionData);
}
struct.set(PARTITIONS_KEY_NAME, partitionDatas.toArray());
return struct;
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class LeaderAndIsrRequest method toStruct.
@Override
protected Struct toStruct() {
short version = version();
Struct struct = new Struct(ApiKeys.LEADER_AND_ISR.requestSchema(version));
struct.set(CONTROLLER_ID_KEY_NAME, controllerId);
struct.set(CONTROLLER_EPOCH_KEY_NAME, controllerEpoch);
List<Struct> partitionStatesData = new ArrayList<>(partitionStates.size());
for (Map.Entry<TopicPartition, PartitionState> entry : partitionStates.entrySet()) {
Struct partitionStateData = struct.instance(PARTITION_STATES_KEY_NAME);
TopicPartition topicPartition = entry.getKey();
partitionStateData.set(TOPIC_KEY_NAME, topicPartition.topic());
partitionStateData.set(PARTITION_KEY_NAME, topicPartition.partition());
PartitionState partitionState = entry.getValue();
partitionStateData.set(CONTROLLER_EPOCH_KEY_NAME, partitionState.controllerEpoch);
partitionStateData.set(LEADER_KEY_NAME, partitionState.leader);
partitionStateData.set(LEADER_EPOCH_KEY_NAME, partitionState.leaderEpoch);
partitionStateData.set(ISR_KEY_NAME, partitionState.isr.toArray());
partitionStateData.set(ZK_VERSION_KEY_NAME, partitionState.zkVersion);
partitionStateData.set(REPLICAS_KEY_NAME, partitionState.replicas.toArray());
partitionStatesData.add(partitionStateData);
}
struct.set(PARTITION_STATES_KEY_NAME, partitionStatesData.toArray());
List<Struct> leadersData = new ArrayList<>(liveLeaders.size());
for (Node leader : liveLeaders) {
Struct leaderData = struct.instance(LIVE_LEADERS_KEY_NAME);
leaderData.set(END_POINT_ID_KEY_NAME, leader.id());
leaderData.set(HOST_KEY_NAME, leader.host());
leaderData.set(PORT_KEY_NAME, leader.port());
leadersData.add(leaderData);
}
struct.set(LIVE_LEADERS_KEY_NAME, leadersData.toArray());
return struct;
}
Aggregations