Search in sources :

Example 91 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project cruise-control by linkedin.

the class KafkaCruiseControlServlet method getState.

private boolean getState(HttpServletRequest request, HttpServletResponse response) throws Exception {
    boolean verbose;
    boolean superVerbose;
    boolean json = wantJSON(request);
    try {
        String verboseString = request.getParameter(VERBOSE_PARAM);
        verbose = verboseString != null && Boolean.parseBoolean(verboseString);
        String superVerboseString = request.getParameter(SUPER_VERBOSE_PARM);
        superVerbose = superVerboseString != null && Boolean.parseBoolean(superVerboseString);
    } catch (Exception e) {
        StringWriter sw = new StringWriter();
        e.printStackTrace(new PrintWriter(sw));
        setErrorResponse(response, sw.toString(), e.getMessage(), SC_BAD_REQUEST, json);
        // Close session
        return true;
    }
    KafkaCruiseControlState state = getAndMaybeReturnProgress(request, response, _asyncKafkaCruiseControl::state);
    if (state == null) {
        return false;
    }
    OutputStream out = response.getOutputStream();
    if (json) {
        String stateString = state.getJSONString(JSON_VERSION);
        setJSONResponseCode(response, SC_OK);
        response.setContentLength(stateString.length());
        out.write(stateString.getBytes(StandardCharsets.UTF_8));
    } else {
        String stateString = state.toString();
        setResponseCode(response, SC_OK);
        out.write(stateString.getBytes(StandardCharsets.UTF_8));
        if (verbose || superVerbose) {
            out.write(String.format("%n%nMonitored Windows [Window End_Time=Data_Completeness]:%n").getBytes(StandardCharsets.UTF_8));
            StringJoiner joiner = new StringJoiner(", ", "{", "}");
            for (Map.Entry<Long, Float> entry : state.monitorState().monitoredWindows().entrySet()) {
                joiner.add(String.format("%d=%.3f%%", entry.getKey(), entry.getValue() * 100));
            }
            out.write(joiner.toString().getBytes(StandardCharsets.UTF_8));
            out.write(String.format("%n%nGoal Readiness:%n").getBytes(StandardCharsets.UTF_8));
            for (Map.Entry<Goal, Boolean> entry : state.analyzerState().readyGoals().entrySet()) {
                Goal goal = entry.getKey();
                out.write(String.format("%50s, %s, %s%n", goal.getClass().getSimpleName(), goal.clusterModelCompletenessRequirements(), entry.getValue() ? "Ready" : "NotReady").getBytes(StandardCharsets.UTF_8));
            }
            ExecutorState executorState = state.executorState();
            if (executorState.state() == ExecutorState.State.REPLICA_MOVEMENT_TASK_IN_PROGRESS || executorState.state() == ExecutorState.State.STOPPING_EXECUTION) {
                out.write(String.format("%n%nIn progress %s:%n", PARTITION_MOVEMENTS).getBytes(StandardCharsets.UTF_8));
                for (ExecutionTask task : executorState.inProgressPartitionMovements()) {
                    out.write(String.format("%s%n", task).getBytes(StandardCharsets.UTF_8));
                }
                out.write(String.format("%n%nAborting %s:%n", PARTITION_MOVEMENTS).getBytes(StandardCharsets.UTF_8));
                for (ExecutionTask task : executorState.abortingPartitionMovements()) {
                    out.write(String.format("%s%n", task).getBytes(StandardCharsets.UTF_8));
                }
                out.write(String.format("%n%nAborted %s:%n", PARTITION_MOVEMENTS).getBytes(StandardCharsets.UTF_8));
                for (ExecutionTask task : executorState.abortedPartitionMovements()) {
                    out.write(String.format("%s%n", task).getBytes(StandardCharsets.UTF_8));
                }
                out.write(String.format("%n%nDead %s:%n", PARTITION_MOVEMENTS).getBytes(StandardCharsets.UTF_8));
                for (ExecutionTask task : executorState.deadPartitionMovements()) {
                    out.write(String.format("%s%n", task).getBytes(StandardCharsets.UTF_8));
                }
                out.write(String.format("%n%n%s %s:%n", executorState.state() == ExecutorState.State.STOPPING_EXECUTION ? "Cancelled" : "Pending", PARTITION_MOVEMENTS).getBytes(StandardCharsets.UTF_8));
                for (ExecutionTask task : executorState.pendingPartitionMovements()) {
                    out.write(String.format("%s%n", task).getBytes(StandardCharsets.UTF_8));
                }
            }
            if (superVerbose) {
                out.write(String.format("%n%nExtrapolated metric samples:%n").getBytes(StandardCharsets.UTF_8));
                Map<TopicPartition, List<SampleExtrapolation>> sampleFlaws = state.monitorState().sampleExtrapolations();
                if (sampleFlaws != null && !sampleFlaws.isEmpty()) {
                    for (Map.Entry<TopicPartition, List<SampleExtrapolation>> entry : sampleFlaws.entrySet()) {
                        out.write(String.format("%n%s: %s", entry.getKey(), entry.getValue()).getBytes(StandardCharsets.UTF_8));
                    }
                } else {
                    out.write("None".getBytes(StandardCharsets.UTF_8));
                }
                out.write(String.format("%n%nLinear Regression Model State:%n%s", state.monitorState().detailTrainingProgress()).getBytes(StandardCharsets.UTF_8));
            }
        }
    }
    response.getOutputStream().flush();
    return true;
}
Also used : KafkaCruiseControlState(com.linkedin.kafka.cruisecontrol.KafkaCruiseControlState) ServletOutputStream(javax.servlet.ServletOutputStream) OutputStream(java.io.OutputStream) ExecutionTask(com.linkedin.kafka.cruisecontrol.executor.ExecutionTask) TimeoutException(java.util.concurrent.TimeoutException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) KafkaAssignerEvenRackAwareGoal(com.linkedin.kafka.cruisecontrol.analyzer.kafkaassigner.KafkaAssignerEvenRackAwareGoal) Goal(com.linkedin.kafka.cruisecontrol.analyzer.goals.Goal) KafkaAssignerDiskUsageDistributionGoal(com.linkedin.kafka.cruisecontrol.analyzer.kafkaassigner.KafkaAssignerDiskUsageDistributionGoal) StringWriter(java.io.StringWriter) ExecutorState(com.linkedin.kafka.cruisecontrol.executor.ExecutorState) TopicPartition(org.apache.kafka.common.TopicPartition) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) SortedMap(java.util.SortedMap) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap) StringJoiner(java.util.StringJoiner) PrintWriter(java.io.PrintWriter)

Example 92 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project cruise-control by linkedin.

the class DefaultMetricSamplerPartitionAssignorTest method testAssignment.

/**
 * This is a pretty loose test because the default assignor is sort of doing a best effort job.
 */
@Test
public void testAssignment() {
    int maxNumPartitionsForTopic = -1;
    int totalNumPartitions = 0;
    // Prepare the metadata
    Node node0 = new Node(0, "localhost", 100, "rack0");
    Node node1 = new Node(1, "localhost", 100, "rack1");
    Node[] nodes = { node0, node1 };
    Set<Node> allNodes = new HashSet<>();
    allNodes.add(node0);
    allNodes.add(node1);
    Set<PartitionInfo> partitions = new HashSet<>();
    for (int i = 0; i < NUM_TOPICS; i++) {
        // Random number of partitions ranging from 4 to 400
        int randomNumPartitions = 4 * (_random.nextInt(100) + 1);
        maxNumPartitionsForTopic = Math.max(randomNumPartitions, maxNumPartitionsForTopic);
        totalNumPartitions += randomNumPartitions;
        for (int j = 0; j < randomNumPartitions; j++) {
            partitions.add(new PartitionInfo(TOPIC_PREFIX + i, j, node0, nodes, nodes));
        }
    }
    Cluster cluster = new Cluster("cluster", allNodes, partitions, Collections.emptySet(), Collections.emptySet());
    Metadata metadata = new Metadata(10, 10, false);
    metadata.update(cluster, Collections.emptySet(), 0);
    MetricSamplerPartitionAssignor assignor = new DefaultMetricSamplerPartitionAssignor();
    List<Set<TopicPartition>> assignments = assignor.assignPartitions(metadata.fetch(), NUM_FETCHERS);
    int maxAssignedNumPartitionsForFetcher = -1;
    int minAssignedNumPartitionsForFetcher = Integer.MAX_VALUE;
    int totalAssignedNumPartitions = 0;
    Set<TopicPartition> uniqueAssignedPartitions = new HashSet<>();
    for (Set<TopicPartition> assignment : assignments) {
        maxAssignedNumPartitionsForFetcher = Math.max(maxAssignedNumPartitionsForFetcher, assignment.size());
        minAssignedNumPartitionsForFetcher = Math.min(minAssignedNumPartitionsForFetcher, assignment.size());
        uniqueAssignedPartitions.addAll(assignment);
        totalAssignedNumPartitions += assignment.size();
    }
    // Make sure all the partitions are assigned and there is no double assignment.
    assertEquals("Total assigned number of partitions should be " + totalNumPartitions, totalNumPartitions, totalAssignedNumPartitions);
    assertEquals("Total number of unique assigned partitions should be " + totalNumPartitions, totalNumPartitions, uniqueAssignedPartitions.size());
    int avgAssignedPartitionsPerFetcher = totalNumPartitions / NUM_FETCHERS;
    assertTrue("In the worst case the max number of partitions assigned to a metric fetchers should not differ by " + "more than the partition number of the biggest topic, which is " + maxNumPartitionsForTopic, maxAssignedNumPartitionsForFetcher - avgAssignedPartitionsPerFetcher <= maxNumPartitionsForTopic);
    assertTrue("In the worst case the min number of partitions assigned to a metric fetchers should not differ by " + "more than the partition number of the biggest topic, which is " + maxNumPartitionsForTopic, avgAssignedPartitionsPerFetcher - minAssignedNumPartitionsForFetcher <= maxNumPartitionsForTopic);
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) Node(org.apache.kafka.common.Node) Metadata(org.apache.kafka.clients.Metadata) Cluster(org.apache.kafka.common.Cluster) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionInfo(org.apache.kafka.common.PartitionInfo) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 93 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project cruise-control by linkedin.

the class PartitionMetricSampleTest method testSerde.

@Test
public void testSerde() throws UnknownVersionException {
    MetricDef metricDef = KafkaCruiseControlMetricDef.metricDef();
    PartitionMetricSample sample = new PartitionMetricSample(0, new TopicPartition("topic", 0));
    int i = 0;
    for (Resource r : Resource.values()) {
        sample.record(KafkaCruiseControlMetricDef.resourceToMetricInfo(r), i);
        i++;
    }
    sample.record(metricDef.metricInfo(PRODUCE_RATE.name()), (double) i++);
    sample.record(metricDef.metricInfo(FETCH_RATE.name()), (double) i++);
    sample.record(metricDef.metricInfo(MESSAGE_IN_RATE.name()), (double) i++);
    sample.record(metricDef.metricInfo(REPLICATION_BYTES_IN_RATE.name()), (double) i++);
    sample.record(metricDef.metricInfo(REPLICATION_BYTES_OUT_RATE.name()), (double) i);
    sample.close(10);
    byte[] bytes = sample.toBytes();
    PartitionMetricSample deserializedSample = PartitionMetricSample.fromBytes(bytes);
    assertEquals(sample.brokerId(), deserializedSample.brokerId());
    assertEquals(sample.entity().tp(), deserializedSample.entity().tp());
    assertEquals(sample.metricValue(KafkaCruiseControlMetricDef.resourceToMetricId(Resource.CPU)), deserializedSample.metricValue(KafkaCruiseControlMetricDef.resourceToMetricId(Resource.CPU)));
    assertEquals(sample.metricValue(KafkaCruiseControlMetricDef.resourceToMetricId(Resource.DISK)), deserializedSample.metricValue(KafkaCruiseControlMetricDef.resourceToMetricId(Resource.DISK)));
    assertEquals(sample.metricValue(KafkaCruiseControlMetricDef.resourceToMetricId(Resource.NW_IN)), deserializedSample.metricValue(KafkaCruiseControlMetricDef.resourceToMetricId(Resource.NW_IN)));
    assertEquals(sample.metricValue(KafkaCruiseControlMetricDef.resourceToMetricId(Resource.NW_OUT)), deserializedSample.metricValue(KafkaCruiseControlMetricDef.resourceToMetricId(Resource.NW_OUT)));
    assertEquals(sample.metricValue(metricDef.metricInfo(PRODUCE_RATE.name()).id()), deserializedSample.metricValue(metricDef.metricInfo(PRODUCE_RATE.name()).id()), EPSILON);
    assertEquals(sample.metricValue(metricDef.metricInfo(FETCH_RATE.name()).id()), deserializedSample.metricValue(metricDef.metricInfo(FETCH_RATE.name()).id()), EPSILON);
    assertEquals(sample.metricValue(metricDef.metricInfo(MESSAGE_IN_RATE.name()).id()), deserializedSample.metricValue(metricDef.metricInfo(MESSAGE_IN_RATE.name()).id()), EPSILON);
    assertEquals(sample.metricValue(metricDef.metricInfo(REPLICATION_BYTES_IN_RATE.name()).id()), deserializedSample.metricValue(metricDef.metricInfo(REPLICATION_BYTES_IN_RATE.name()).id()), EPSILON);
    assertEquals(sample.metricValue(metricDef.metricInfo(REPLICATION_BYTES_OUT_RATE.name()).id()), deserializedSample.metricValue(metricDef.metricInfo(REPLICATION_BYTES_OUT_RATE.name()).id()), EPSILON);
    assertEquals(sample.sampleTime(), deserializedSample.sampleTime());
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) MetricDef(com.linkedin.cruisecontrol.metricdef.MetricDef) KafkaCruiseControlMetricDef(com.linkedin.kafka.cruisecontrol.monitor.metricdefinition.KafkaCruiseControlMetricDef) Resource(com.linkedin.kafka.cruisecontrol.common.Resource) Test(org.junit.Test)

Example 94 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project cruise-control by linkedin.

the class PartitionMetricSampleTest method testRecordSameResourceMetricAgain.

@Test
public void testRecordSameResourceMetricAgain() {
    PartitionMetricSample sample = new PartitionMetricSample(0, new TopicPartition("topic", 0));
    sample.record(KafkaCruiseControlMetricDef.resourceToMetricInfo(Resource.DISK), 0);
    try {
        sample.record(KafkaCruiseControlMetricDef.resourceToMetricInfo(Resource.DISK), 0.0);
        fail("Should throw IllegalStateException");
    } catch (IllegalStateException ise) {
    // let it go.
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) Test(org.junit.Test)

Example 95 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project cruise-control by linkedin.

the class KafkaMetricSampleAggregatorTest method setupScenario4.

/**
 * 3 Topics with 2 partitions each.
 * T0P1 has all the windows with AVG_AVAILABLE as extrapolations.
 * T1P1 misses window 6000 (index=5), 7000 (index=6)
 * All other partitions have full data.
 */
private TestContext setupScenario4() {
    TopicPartition t0p1 = new TopicPartition(TOPIC, 1);
    TopicPartition t1p0 = new TopicPartition("TOPIC1", 0);
    TopicPartition t1p1 = new TopicPartition("TOPIC1", 1);
    TopicPartition t2p0 = new TopicPartition("TOPIC2", 0);
    TopicPartition t2p1 = new TopicPartition("TOPIC2", 1);
    List<TopicPartition> allPartitions = Arrays.asList(TP, t0p1, t1p0, t1p1, t2p0, t2p1);
    KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
    Metadata metadata = getMetadata(allPartitions);
    KafkaMetricSampleAggregator aggregator = new KafkaMetricSampleAggregator(config, metadata);
    for (TopicPartition tp : Arrays.asList(TP, t1p0, t2p0, t2p1)) {
        populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, aggregator, tp);
    }
    // Let t0p1 have too many extrapolationss.
    populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW - 1, aggregator, t0p1);
    // let t1p1 miss another earlier window
    populateSampleAggregator(5, MIN_SAMPLES_PER_WINDOW, aggregator, t1p1);
    CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS - 6, MIN_SAMPLES_PER_WINDOW, aggregator, new PartitionEntity(t1p1), 7, WINDOW_MS, KafkaCruiseControlMetricDef.metricDef());
    return new TestContext(metadata, aggregator);
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) PartitionEntity(com.linkedin.kafka.cruisecontrol.monitor.sampling.PartitionEntity) Metadata(org.apache.kafka.clients.Metadata) KafkaCruiseControlConfig(com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig)

Aggregations

TopicPartition (org.apache.kafka.common.TopicPartition)1729 HashMap (java.util.HashMap)744 Test (org.junit.Test)519 ArrayList (java.util.ArrayList)416 Map (java.util.Map)361 Test (org.junit.jupiter.api.Test)347 HashSet (java.util.HashSet)281 List (java.util.List)260 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)246 Set (java.util.Set)189 LinkedHashMap (java.util.LinkedHashMap)180 PartitionInfo (org.apache.kafka.common.PartitionInfo)170 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)155 TaskId (org.apache.kafka.streams.processor.TaskId)145 Node (org.apache.kafka.common.Node)140 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)109 KafkaException (org.apache.kafka.common.KafkaException)105 Errors (org.apache.kafka.common.protocol.Errors)105 ByteBuffer (java.nio.ByteBuffer)99 Properties (java.util.Properties)93