Search in sources :

Example 86 with ConnectorTaskId

use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.

the class ExtendedAssignment method extractTasks.

private static Collection<ConnectorTaskId> extractTasks(Struct struct, String key) {
    assert REVOKED_KEY_NAME.equals(key) || ASSIGNMENT_KEY_NAME.equals(key);
    Object[] tasks = struct.getArray(key);
    if (tasks == null) {
        return Collections.emptyList();
    }
    List<ConnectorTaskId> tasksIds = new ArrayList<>();
    for (Object structObj : tasks) {
        Struct assignment = (Struct) structObj;
        String connector = assignment.getString(CONNECTOR_KEY_NAME);
        for (Object taskIdObj : assignment.getArray(TASKS_KEY_NAME)) {
            Integer taskId = (Integer) taskIdObj;
            if (taskId != CONNECTOR_TASK) {
                tasksIds.add(new ConnectorTaskId(connector, taskId));
            }
        }
    }
    return tasksIds;
}
Also used : ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) ArrayList(java.util.ArrayList) Struct(org.apache.kafka.common.protocol.types.Struct)

Example 87 with ConnectorTaskId

use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.

the class IncrementalCooperativeAssignor method computeDuplicatedAssignments.

private Map<String, ConnectorsAndTasks> computeDuplicatedAssignments(Map<String, ExtendedWorkerState> memberConfigs, Map<String, Collection<String>> connectorAssignments, Map<String, Collection<ConnectorTaskId>> taskAssignment) {
    ConnectorsAndTasks duplicatedAssignments = duplicatedAssignments(memberConfigs);
    log.debug("Duplicated assignments: {}", duplicatedAssignments);
    Map<String, ConnectorsAndTasks> toRevoke = new HashMap<>();
    if (!duplicatedAssignments.connectors().isEmpty()) {
        connectorAssignments.entrySet().stream().forEach(entry -> {
            Set<String> duplicatedConnectors = new HashSet<>(duplicatedAssignments.connectors());
            duplicatedConnectors.retainAll(entry.getValue());
            if (!duplicatedConnectors.isEmpty()) {
                toRevoke.computeIfAbsent(entry.getKey(), v -> new ConnectorsAndTasks.Builder().build()).connectors().addAll(duplicatedConnectors);
            }
        });
    }
    if (!duplicatedAssignments.tasks().isEmpty()) {
        taskAssignment.entrySet().stream().forEach(entry -> {
            Set<ConnectorTaskId> duplicatedTasks = new HashSet<>(duplicatedAssignments.tasks());
            duplicatedTasks.retainAll(entry.getValue());
            if (!duplicatedTasks.isEmpty()) {
                toRevoke.computeIfAbsent(entry.getKey(), v -> new ConnectorsAndTasks.Builder().build()).tasks().addAll(duplicatedTasks);
            }
        });
    }
    return toRevoke;
}
Also used : ConnectorsAndTasks(org.apache.kafka.connect.runtime.distributed.WorkerCoordinator.ConnectorsAndTasks) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) HashMap(java.util.HashMap) HashSet(java.util.HashSet) LinkedHashSet(java.util.LinkedHashSet)

Example 88 with ConnectorTaskId

use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.

the class Worker method awaitStopTasks.

private void awaitStopTasks(Collection<ConnectorTaskId> ids) {
    long now = time.milliseconds();
    long deadline = now + config.getLong(WorkerConfig.TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_CONFIG);
    for (ConnectorTaskId id : ids) {
        long remaining = Math.max(0, deadline - time.milliseconds());
        awaitStopTask(id, remaining);
    }
}
Also used : ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId)

Example 89 with ConnectorTaskId

use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.

the class ClusterConfigState method tasks.

/**
 * Get the current set of task IDs for the specified connector.
 * @param connectorName the name of the connector to look up task configs for
 * @return the current set of connector task IDs
 */
public List<ConnectorTaskId> tasks(String connectorName) {
    if (inconsistentConnectors.contains(connectorName)) {
        return Collections.emptyList();
    }
    Integer numTasks = connectorTaskCounts.get(connectorName);
    if (numTasks == null) {
        return Collections.emptyList();
    }
    List<ConnectorTaskId> taskIds = new ArrayList<>(numTasks);
    for (int taskIndex = 0; taskIndex < numTasks; taskIndex++) {
        ConnectorTaskId taskId = new ConnectorTaskId(connectorName, taskIndex);
        taskIds.add(taskId);
    }
    return Collections.unmodifiableList(taskIds);
}
Also used : ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) ArrayList(java.util.ArrayList)

Example 90 with ConnectorTaskId

use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.

the class KafkaStatusBackingStoreFormatTest method putTopicStateShouldOverridePreviousState.

@Test
public void putTopicStateShouldOverridePreviousState() {
    TopicStatus firstTopicStatus = new TopicStatus(FOO_TOPIC, new ConnectorTaskId(FOO_CONNECTOR, 0), time.milliseconds());
    time.sleep(1000);
    TopicStatus secondTopicStatus = new TopicStatus(BAR_TOPIC, new ConnectorTaskId(FOO_CONNECTOR, 0), time.milliseconds());
    String firstKey = TOPIC_STATUS_PREFIX + FOO_TOPIC + TOPIC_STATUS_SEPARATOR + FOO_CONNECTOR;
    String secondKey = TOPIC_STATUS_PREFIX + BAR_TOPIC + TOPIC_STATUS_SEPARATOR + FOO_CONNECTOR;
    ArgumentCaptor<byte[]> valueCaptor = ArgumentCaptor.forClass(byte[].class);
    doAnswer(invocation -> {
        ((Callback) invocation.getArgument(2)).onCompletion(null, null);
        ConsumerRecord<String, byte[]> statusRecord = new ConsumerRecord<>(STATUS_TOPIC, 0, 0, secondKey, valueCaptor.getValue());
        store.read(statusRecord);
        return null;
    }).when(kafkaBasedLog).send(eq(secondKey), valueCaptor.capture(), any(Callback.class));
    byte[] value = store.serializeTopicStatus(firstTopicStatus);
    ConsumerRecord<String, byte[]> statusRecord = new ConsumerRecord<>(STATUS_TOPIC, 0, 0, firstKey, value);
    store.read(statusRecord);
    store.put(secondTopicStatus);
    // check capture state
    assertEquals(secondTopicStatus, store.parseTopicStatus(valueCaptor.getValue()));
    assertEquals(firstTopicStatus, store.getTopic(FOO_CONNECTOR, FOO_TOPIC));
    assertEquals(secondTopicStatus, store.getTopic(FOO_CONNECTOR, BAR_TOPIC));
    assertEquals(new HashSet<>(Arrays.asList(firstTopicStatus, secondTopicStatus)), new HashSet<>(store.getAllTopics(FOO_CONNECTOR)));
}
Also used : Callback(org.apache.kafka.clients.producer.Callback) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) TopicStatus(org.apache.kafka.connect.runtime.TopicStatus) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.Test)

Aggregations

ConnectorTaskId (org.apache.kafka.connect.util.ConnectorTaskId)111 Test (org.junit.Test)59 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)45 HashMap (java.util.HashMap)26 ArrayList (java.util.ArrayList)25 Map (java.util.Map)18 FutureCallback (org.apache.kafka.connect.util.FutureCallback)16 ConnectException (org.apache.kafka.connect.errors.ConnectException)15 Callback (org.apache.kafka.connect.util.Callback)15 Connector (org.apache.kafka.connect.connector.Connector)13 NotFoundException (org.apache.kafka.connect.errors.NotFoundException)12 ConnectorInfo (org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo)12 SinkConnector (org.apache.kafka.connect.sink.SinkConnector)12 TaskStatus (org.apache.kafka.connect.runtime.TaskStatus)11 WorkerConnector (org.apache.kafka.connect.runtime.WorkerConnector)11 SourceConnector (org.apache.kafka.connect.source.SourceConnector)11 Herder (org.apache.kafka.connect.runtime.Herder)10 List (java.util.List)9 StatusBackingStore (org.apache.kafka.connect.storage.StatusBackingStore)9 ConnectorStateInfo (org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo)8