use of org.apache.kafka.common.TopicPartition in project brave by openzipkin.
the class TracingConsumer method poll.
/**
* This
*/
@Override
public ConsumerRecords<K, V> poll(long timeout) {
ConsumerRecords<K, V> records = delegate.poll(timeout);
if (records.isEmpty() || tracing.isNoop())
return records;
Map<String, Span> consumerSpansForTopic = new LinkedHashMap<>();
for (TopicPartition partition : records.partitions()) {
String topic = partition.topic();
List<ConsumerRecord<K, V>> recordsInPartition = records.records(partition);
for (int i = 0, length = recordsInPartition.size(); i < length; i++) {
ConsumerRecord<K, V> record = recordsInPartition.get(i);
TraceContextOrSamplingFlags extracted = extractor.extract(record.headers());
// make or reuse a span for this topic
if (extracted.samplingFlags() != null && extracted.extra().isEmpty()) {
Span consumerSpanForTopic = consumerSpansForTopic.get(topic);
if (consumerSpanForTopic == null) {
consumerSpansForTopic.put(topic, consumerSpanForTopic = tracing.tracer().nextSpan(extracted).name("poll").kind(Span.Kind.CONSUMER).tag(KafkaTags.KAFKA_TOPIC_TAG, topic).start());
}
// no need to remove propagation headers as we failed to extract anything
injector.inject(consumerSpanForTopic.context(), record.headers());
} else {
// we extracted request-scoped data, so cannot share a consumer span.
Span span = tracing.tracer().nextSpan(extracted);
if (!span.isNoop()) {
span.name("poll").kind(Span.Kind.CONSUMER).tag(KafkaTags.KAFKA_TOPIC_TAG, topic);
if (remoteServiceName != null) {
span.remoteEndpoint(Endpoint.newBuilder().serviceName(remoteServiceName).build());
}
// span won't be shared by other records
span.start().finish();
}
// remove prior propagation headers from the record
tracing.propagation().keys().forEach(key -> record.headers().remove(key));
injector.inject(span.context(), record.headers());
}
}
}
consumerSpansForTopic.values().forEach(span -> {
if (remoteServiceName != null) {
span.remoteEndpoint(Endpoint.newBuilder().serviceName(remoteServiceName).build());
}
span.finish();
});
return records;
}
use of org.apache.kafka.common.TopicPartition in project brave by openzipkin.
the class TracingCallbackTest method createRecordMetadata.
RecordMetadata createRecordMetadata() {
TopicPartition tp = new TopicPartition("foo", 0);
long timestamp = 2340234L;
int keySize = 3;
int valueSize = 5;
Long checksum = 908923L;
return new RecordMetadata(tp, -1L, -1L, timestamp, checksum, keySize, valueSize);
}
use of org.apache.kafka.common.TopicPartition in project cruise-control by linkedin.
the class AnalyzerUtils method getDiff.
/**
* Get the diff represented by the set of balancing proposals to move from initial to final distribution.
*
* @param initialReplicaDistribution Initial distribution of replicas over the cluster.
* @param initialLeaderDistribution Initial distribution of the leaders.
* @param optimizedClusterModel The optimized cluster model.
* @return The diff represented by the set of balancing proposals to move from initial to final distribution.
*/
public static Set<ExecutionProposal> getDiff(Map<TopicPartition, List<Integer>> initialReplicaDistribution, Map<TopicPartition, Integer> initialLeaderDistribution, ClusterModel optimizedClusterModel) {
Map<TopicPartition, List<Integer>> finalDistribution = optimizedClusterModel.getReplicaDistribution();
// Sanity check to make sure that given distributions contain the same replicas.
if (!initialReplicaDistribution.keySet().equals(finalDistribution.keySet())) {
throw new IllegalArgumentException("Attempt to diff distributions with different partitions.");
}
for (Map.Entry<TopicPartition, List<Integer>> entry : initialReplicaDistribution.entrySet()) {
TopicPartition tp = entry.getKey();
List<Integer> initialReplicas = entry.getValue();
if (finalDistribution.get(tp).size() != initialReplicas.size()) {
throw new IllegalArgumentException("Attempt to diff distributions with modified replication factor.");
}
}
// Generate a set of execution proposals to represent the diff between initial and final distribution.
Set<ExecutionProposal> diff = new HashSet<>();
for (Map.Entry<TopicPartition, List<Integer>> entry : initialReplicaDistribution.entrySet()) {
TopicPartition tp = entry.getKey();
List<Integer> initialReplicas = entry.getValue();
List<Integer> finalReplicas = finalDistribution.get(tp);
int finalLeaderId = optimizedClusterModel.partition(tp).leader().broker().id();
// The partition has no change.
if (finalReplicas.equals(initialReplicas) && finalLeaderId == initialLeaderDistribution.get(tp)) {
continue;
}
// We need to adjust the final broker list order to ensure the final leader is the first replica.
if (finalLeaderId != finalReplicas.get(0)) {
int leaderPos = finalReplicas.indexOf(finalLeaderId);
finalReplicas.set(leaderPos, finalReplicas.get(0));
finalReplicas.set(0, finalLeaderId);
}
Double partitionSize = optimizedClusterModel.partition(tp).leader().load().expectedUtilizationFor(Resource.DISK);
diff.add(new ExecutionProposal(tp, partitionSize.intValue(), initialLeaderDistribution.get(tp), initialReplicas, finalReplicas));
}
return diff;
}
use of org.apache.kafka.common.TopicPartition in project cruise-control by linkedin.
the class ExecutionTaskPlanner method addExecutionProposal.
/**
* Add a new proposal that needs to be executed.
*
* A proposal will have at least one of the two following actions:
* 1. Replica action (i.e. movement, addition, deletion or order change).
* 2. Leader action (i.e. leader movement)
*
* @param proposal the proposal to execute.
* @param cluster Kafka cluster state.
*/
private void addExecutionProposal(ExecutionProposal proposal, Cluster cluster) {
// Get the execution Id for this proposal;
// 1) Create a replica action task if there is a need for moving replica(s) to reach expected final proposal state.
TopicPartition tp = proposal.topicPartition();
PartitionInfo partitionInfo = cluster.partition(tp);
if (partitionInfo == null) {
LOG.trace("Ignored the attempt to move non-existing partition for topic partition: {}", tp);
return;
}
if (!proposal.isCompletedSuccessfully(partitionInfo.replicas())) {
long replicaActionExecutionId = _executionId.getAndIncrement();
ExecutionTask executionTask = new ExecutionTask(replicaActionExecutionId, proposal, REPLICA_ACTION);
_remainingReplicaMovements.add(executionTask);
_remainingDataToMove += proposal.dataToMoveInMB();
// Add the proposal to source broker's execution plan
int sourceBroker = proposal.oldLeader();
Map<Long, ExecutionTask> sourceBrokerProposalMap = _partMoveProposalByBrokerId.computeIfAbsent(sourceBroker, k -> new HashMap<>());
sourceBrokerProposalMap.put(replicaActionExecutionId, executionTask);
// Add the proposal to destination brokers' execution plan
for (int destinationBroker : proposal.replicasToAdd()) {
Map<Long, ExecutionTask> destinationBrokerProposalMap = _partMoveProposalByBrokerId.computeIfAbsent(destinationBroker, k -> new HashMap<>());
destinationBrokerProposalMap.put(replicaActionExecutionId, executionTask);
}
LOG.trace("Added action {} as replica proposal {}", replicaActionExecutionId, proposal);
}
// 2) Create a leader action task if there is a need for moving the leader to reach expected final proposal state.
if (proposal.hasLeaderAction()) {
Node currentLeader = cluster.leaderFor(tp);
if (currentLeader != null && currentLeader.id() != proposal.newLeader()) {
// Get the execution Id for the leader action proposal execution;
long leaderActionExecutionId = _executionId.getAndIncrement();
ExecutionTask leaderActionTask = new ExecutionTask(leaderActionExecutionId, proposal, LEADER_ACTION);
_leaderMovements.put(leaderActionExecutionId, leaderActionTask);
LOG.trace("Added action {} as leader proposal {}", leaderActionExecutionId, proposal);
}
}
}
use of org.apache.kafka.common.TopicPartition in project cruise-control by linkedin.
the class KafkaCruiseControlServlet method getPartitionLoad.
private boolean getPartitionLoad(HttpServletRequest request, HttpServletResponse response) throws Exception {
Resource resource;
Long startMs;
Long endMs;
boolean json = wantJSON(request);
try {
String resourceString = request.getParameter(RESOURCE_PARAM);
try {
if (resourceString == null) {
resourceString = DEFAULT_PARTITION_LOAD_RESOURCE;
}
resource = Resource.valueOf(resourceString.toUpperCase());
} catch (IllegalArgumentException iae) {
String errorMsg = String.format("Invalid resource type %s. The resource type must be one of the following: " + "CPU, DISK, NW_IN, NW_OUT", resourceString);
StringWriter sw = new StringWriter();
iae.printStackTrace(new PrintWriter(sw));
setErrorResponse(response, sw.toString(), errorMsg, SC_BAD_REQUEST, json);
// Close session
return true;
}
String startMsString = request.getParameter(START_MS_PARAM);
String endMsString = request.getParameter(END_MS_PARAM);
startMs = startMsString == null ? -1L : Long.parseLong(startMsString);
endMs = endMsString == null ? System.currentTimeMillis() : Long.parseLong(endMsString);
} catch (Exception e) {
StringWriter sw = new StringWriter();
e.printStackTrace(new PrintWriter(sw));
setErrorResponse(response, sw.toString(), e.getMessage(), SC_BAD_REQUEST, json);
// Close session
return true;
}
ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false);
// Get cluster model asynchronously.
ClusterModel clusterModel = getAndMaybeReturnProgress(request, response, () -> _asyncKafkaCruiseControl.clusterModel(startMs, endMs, requirements));
if (clusterModel == null) {
return false;
}
List<Partition> sortedPartitions = clusterModel.replicasSortedByUtilization(resource);
OutputStream out = response.getOutputStream();
String entriesString = request.getParameter(ENTRIES);
Integer entries = entriesString == null ? Integer.MAX_VALUE : Integer.parseInt(entriesString);
int numEntries = 0;
if (!json) {
int topicNameLength = clusterModel.topics().stream().mapToInt(String::length).max().orElse(20) + 5;
setResponseCode(response, SC_OK);
out.write(String.format("%" + topicNameLength + "s%10s%30s%20s%20s%20s%20s%n", "PARTITION", "LEADER", "FOLLOWERS", "CPU (%)", "DISK (MB)", "NW_IN (KB/s)", "NW_OUT (KB/s)").getBytes(StandardCharsets.UTF_8));
for (Partition p : sortedPartitions) {
if (++numEntries > entries) {
break;
}
List<Integer> followers = p.followers().stream().map((replica) -> replica.broker().id()).collect(Collectors.toList());
out.write(String.format("%" + topicNameLength + "s%10s%30s%19.6f%19.3f%19.3f%19.3f%n", p.leader().topicPartition(), p.leader().broker().id(), followers, p.leader().load().expectedUtilizationFor(Resource.CPU), p.leader().load().expectedUtilizationFor(Resource.DISK), p.leader().load().expectedUtilizationFor(Resource.NW_IN), p.leader().load().expectedUtilizationFor(Resource.NW_OUT)).getBytes(StandardCharsets.UTF_8));
}
} else {
Map<String, Object> partitionMap = new HashMap<>();
List<Object> partitionList = new ArrayList<>();
List<String> header = new ArrayList<>(Arrays.asList("topic", "partition", "leader", "followers", "CPU", "DISK", "NW_IN", "NW_OUT"));
partitionMap.put("version", JSON_VERSION);
partitionMap.put("header", header);
for (Partition p : sortedPartitions) {
if (++numEntries > entries) {
break;
}
List<Integer> followers = p.followers().stream().map((replica) -> replica.broker().id()).collect(Collectors.toList());
List<Object> record = new ArrayList<>();
record.add(p.leader().topicPartition().topic());
record.add(p.leader().topicPartition().partition());
record.add(p.leader().broker().id());
record.add(followers);
record.add(p.leader().load().expectedUtilizationFor(Resource.CPU));
record.add(p.leader().load().expectedUtilizationFor(Resource.DISK));
record.add(p.leader().load().expectedUtilizationFor(Resource.NW_IN));
record.add(p.leader().load().expectedUtilizationFor(Resource.NW_OUT));
partitionList.add(record);
}
partitionMap.put("records", partitionList);
Gson gson = new Gson();
String g = gson.toJson(partitionMap);
setJSONResponseCode(response, SC_OK);
response.setContentLength(g.length());
out.write(g.getBytes(StandardCharsets.UTF_8));
}
out.flush();
return true;
}
Aggregations