Search in sources :

Example 81 with ConnectorTaskId

use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.

the class AbstractHerderTest method testBuildRestartPlanForNoRestart.

@Test
public void testBuildRestartPlanForNoRestart() {
    RestartRequest restartRequest = new RestartRequest(connector, true, false);
    ConnectorTaskId taskId1 = new ConnectorTaskId(connector, 1);
    ConnectorTaskId taskId2 = new ConnectorTaskId(connector, 2);
    List<TaskStatus> taskStatuses = new ArrayList<>();
    taskStatuses.add(new TaskStatus(taskId1, AbstractStatus.State.RUNNING, workerId, generation));
    taskStatuses.add(new TaskStatus(taskId2, AbstractStatus.State.FAILED, workerId, generation));
    AbstractHerder herder = partialMockBuilder(AbstractHerder.class).withConstructor(Worker.class, String.class, String.class, StatusBackingStore.class, ConfigBackingStore.class, ConnectorClientConfigOverridePolicy.class).withArgs(worker, workerId, kafkaClusterId, statusStore, configStore, noneConnectorClientConfigOverridePolicy).addMockedMethod("generation").createMock();
    EasyMock.expect(herder.generation()).andStubReturn(generation);
    EasyMock.expect(herder.rawConfig(connector)).andReturn(null);
    EasyMock.expect(statusStore.get(connector)).andReturn(new ConnectorStatus(connector, AbstractStatus.State.RUNNING, workerId, generation));
    EasyMock.expect(statusStore.getAll(connector)).andReturn(taskStatuses);
    EasyMock.expect(worker.getPlugins()).andStubReturn(plugins);
    replayAll();
    Optional<RestartPlan> mayBeRestartPlan = herder.buildRestartPlan(restartRequest);
    assertTrue(mayBeRestartPlan.isPresent());
    RestartPlan restartPlan = mayBeRestartPlan.get();
    assertFalse(restartPlan.shouldRestartConnector());
    assertFalse(restartPlan.shouldRestartTasks());
    assertTrue(restartPlan.taskIdsToRestart().isEmpty());
    PowerMock.verifyAll();
}
Also used : StatusBackingStore(org.apache.kafka.connect.storage.StatusBackingStore) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) ArrayList(java.util.ArrayList) AllConnectorClientConfigOverridePolicy(org.apache.kafka.connect.connector.policy.AllConnectorClientConfigOverridePolicy) PrincipalConnectorClientConfigOverridePolicy(org.apache.kafka.connect.connector.policy.PrincipalConnectorClientConfigOverridePolicy) ConnectorClientConfigOverridePolicy(org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy) NoneConnectorClientConfigOverridePolicy(org.apache.kafka.connect.connector.policy.NoneConnectorClientConfigOverridePolicy) ConfigBackingStore(org.apache.kafka.connect.storage.ConfigBackingStore) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 82 with ConnectorTaskId

use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.

the class DistributedHerder method reconfigureConnector.

// Updates configurations for a connector by requesting them from the connector, filling in parameters provided
// by the system, then checks whether any configs have actually changed before submitting the new configs to storage
private void reconfigureConnector(final String connName, final Callback<Void> cb) {
    try {
        if (!worker.isRunning(connName)) {
            log.info("Skipping reconfiguration of connector {} since it is not running", connName);
            return;
        }
        Map<String, String> configs = configState.connectorConfig(connName);
        ConnectorConfig connConfig;
        if (worker.isSinkConnector(connName)) {
            connConfig = new SinkConnectorConfig(plugins(), configs);
        } else {
            connConfig = new SourceConnectorConfig(plugins(), configs, worker.isTopicCreationEnabled());
        }
        final List<Map<String, String>> taskProps = worker.connectorTaskConfigs(connName, connConfig);
        boolean changed = false;
        int currentNumTasks = configState.taskCount(connName);
        if (taskProps.size() != currentNumTasks) {
            log.debug("Change in connector task count from {} to {}, writing updated task configurations", currentNumTasks, taskProps.size());
            changed = true;
        } else {
            int index = 0;
            for (Map<String, String> taskConfig : taskProps) {
                if (!taskConfig.equals(configState.taskConfig(new ConnectorTaskId(connName, index)))) {
                    log.debug("Change in task configurations, writing updated task configurations");
                    changed = true;
                    break;
                }
                index++;
            }
        }
        if (changed) {
            List<Map<String, String>> rawTaskProps = reverseTransform(connName, configState, taskProps);
            if (isLeader()) {
                configBackingStore.putTaskConfigs(connName, rawTaskProps);
                cb.onCompletion(null, null);
            } else {
                // We cannot forward the request on the same thread because this reconfiguration can happen as a result of connector
                // addition or removal. If we blocked waiting for the response from leader, we may be kicked out of the worker group.
                forwardRequestExecutor.submit(() -> {
                    try {
                        String leaderUrl = leaderUrl();
                        if (Utils.isBlank(leaderUrl)) {
                            cb.onCompletion(new ConnectException("Request to leader to " + "reconfigure connector tasks failed " + "because the URL of the leader's REST interface is empty!"), null);
                            return;
                        }
                        String reconfigUrl = UriBuilder.fromUri(leaderUrl).path("connectors").path(connName).path("tasks").build().toString();
                        log.trace("Forwarding task configurations for connector {} to leader", connName);
                        RestClient.httpRequest(reconfigUrl, "POST", null, rawTaskProps, null, config, sessionKey, requestSignatureAlgorithm);
                        cb.onCompletion(null, null);
                    } catch (ConnectException e) {
                        log.error("Request to leader to reconfigure connector tasks failed", e);
                        cb.onCompletion(e, null);
                    }
                });
            }
        }
    } catch (Throwable t) {
        cb.onCompletion(t, null);
    }
}
Also used : SourceConnectorConfig(org.apache.kafka.connect.runtime.SourceConnectorConfig) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) SourceConnectorConfig(org.apache.kafka.connect.runtime.SourceConnectorConfig) ConnectorConfig(org.apache.kafka.connect.runtime.ConnectorConfig) SinkConnectorConfig(org.apache.kafka.connect.runtime.SinkConnectorConfig) Map(java.util.Map) HashMap(java.util.HashMap) SinkConnectorConfig(org.apache.kafka.connect.runtime.SinkConnectorConfig) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 83 with ConnectorTaskId

use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.

the class DistributedHerder method startWork.

private void startWork() {
    // Start assigned connectors and tasks
    List<Callable<Void>> callables = new ArrayList<>();
    // runningAssignment is always replaced by the assignment here.
    synchronized (this) {
        log.info("Starting connectors and tasks using config offset {}", assignment.offset());
        log.debug("Received assignment: {}", assignment);
        log.debug("Currently running assignment: {}", runningAssignment);
        for (String connectorName : assignmentDifference(assignment.connectors(), runningAssignment.connectors())) {
            callables.add(getConnectorStartingCallable(connectorName));
        }
        // These tasks have been stopped by this worker due to task reconfiguration. In order to
        // restart them, they are removed just before the overall task startup from the set of
        // currently running tasks. Therefore, they'll be restarted only if they are included in
        // the assignment that was just received after rebalancing.
        log.debug("Tasks to restart from currently running assignment: {}", tasksToRestart);
        runningAssignment.tasks().removeAll(tasksToRestart);
        tasksToRestart.clear();
        for (ConnectorTaskId taskId : assignmentDifference(assignment.tasks(), runningAssignment.tasks())) {
            callables.add(getTaskStartingCallable(taskId));
        }
    }
    startAndStop(callables);
    synchronized (this) {
        runningAssignment = member.currentProtocolVersion() == CONNECT_PROTOCOL_V0 ? ExtendedAssignment.empty() : assignment;
    }
    log.info("Finished starting connectors and tasks");
}
Also used : ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) ArrayList(java.util.ArrayList) Callable(java.util.concurrent.Callable)

Example 84 with ConnectorTaskId

use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.

the class ConnectProtocol method deserializeAssignment.

/**
 * Given a byte buffer that contains an assignment as defined by this protocol, return the
 * deserialized form of the assignment.
 *
 * @param buffer the buffer containing a serialized assignment
 * @return the deserialized assignment
 * @throws SchemaException on incompatible Connect protocol version
 */
public static Assignment deserializeAssignment(ByteBuffer buffer) {
    Struct header = CONNECT_PROTOCOL_HEADER_SCHEMA.read(buffer);
    Short version = header.getShort(VERSION_KEY_NAME);
    checkVersionCompatibility(version);
    Struct struct = ASSIGNMENT_V0.read(buffer);
    short error = struct.getShort(ERROR_KEY_NAME);
    String leader = struct.getString(LEADER_KEY_NAME);
    String leaderUrl = struct.getString(LEADER_URL_KEY_NAME);
    long offset = struct.getLong(CONFIG_OFFSET_KEY_NAME);
    List<String> connectorIds = new ArrayList<>();
    List<ConnectorTaskId> taskIds = new ArrayList<>();
    for (Object structObj : struct.getArray(ASSIGNMENT_KEY_NAME)) {
        Struct assignment = (Struct) structObj;
        String connector = assignment.getString(CONNECTOR_KEY_NAME);
        for (Object taskIdObj : assignment.getArray(TASKS_KEY_NAME)) {
            Integer taskId = (Integer) taskIdObj;
            if (taskId == CONNECTOR_TASK)
                connectorIds.add(connector);
            else
                taskIds.add(new ConnectorTaskId(connector, taskId));
        }
    }
    return new Assignment(error, leader, leaderUrl, offset, connectorIds, taskIds);
}
Also used : ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) ArrayList(java.util.ArrayList) Struct(org.apache.kafka.common.protocol.types.Struct)

Example 85 with ConnectorTaskId

use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.

the class EagerAssignor method performTaskAssignment.

private Map<String, ByteBuffer> performTaskAssignment(String leaderId, long maxOffset, Map<String, ExtendedWorkerState> memberConfigs, WorkerCoordinator coordinator) {
    Map<String, Collection<String>> connectorAssignments = new HashMap<>();
    Map<String, Collection<ConnectorTaskId>> taskAssignments = new HashMap<>();
    // Perform round-robin task assignment. Assign all connectors and then all tasks because assigning both the
    // connector and its tasks can lead to very uneven distribution of work in some common cases (e.g. for connectors
    // that generate only 1 task each; in a cluster of 2 or an even # of nodes, only even nodes will be assigned
    // connectors and only odd nodes will be assigned tasks, but tasks are, on average, actually more resource
    // intensive than connectors).
    List<String> connectorsSorted = sorted(coordinator.configSnapshot().connectors());
    CircularIterator<String> memberIt = new CircularIterator<>(sorted(memberConfigs.keySet()));
    for (String connectorId : connectorsSorted) {
        String connectorAssignedTo = memberIt.next();
        log.trace("Assigning connector {} to {}", connectorId, connectorAssignedTo);
        Collection<String> memberConnectors = connectorAssignments.computeIfAbsent(connectorAssignedTo, k -> new ArrayList<>());
        memberConnectors.add(connectorId);
    }
    for (String connectorId : connectorsSorted) {
        for (ConnectorTaskId taskId : sorted(coordinator.configSnapshot().tasks(connectorId))) {
            String taskAssignedTo = memberIt.next();
            log.trace("Assigning task {} to {}", taskId, taskAssignedTo);
            Collection<ConnectorTaskId> memberTasks = taskAssignments.computeIfAbsent(taskAssignedTo, k -> new ArrayList<>());
            memberTasks.add(taskId);
        }
    }
    coordinator.leaderState(new LeaderState(memberConfigs, connectorAssignments, taskAssignments));
    return fillAssignmentsAndSerialize(memberConfigs.keySet(), Assignment.NO_ERROR, leaderId, memberConfigs.get(leaderId).url(), maxOffset, connectorAssignments, taskAssignments);
}
Also used : ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) HashMap(java.util.HashMap) CircularIterator(org.apache.kafka.common.utils.CircularIterator) LeaderState(org.apache.kafka.connect.runtime.distributed.WorkerCoordinator.LeaderState) Collection(java.util.Collection)

Aggregations

ConnectorTaskId (org.apache.kafka.connect.util.ConnectorTaskId)111 Test (org.junit.Test)59 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)45 HashMap (java.util.HashMap)26 ArrayList (java.util.ArrayList)25 Map (java.util.Map)18 FutureCallback (org.apache.kafka.connect.util.FutureCallback)16 ConnectException (org.apache.kafka.connect.errors.ConnectException)15 Callback (org.apache.kafka.connect.util.Callback)15 Connector (org.apache.kafka.connect.connector.Connector)13 NotFoundException (org.apache.kafka.connect.errors.NotFoundException)12 ConnectorInfo (org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo)12 SinkConnector (org.apache.kafka.connect.sink.SinkConnector)12 TaskStatus (org.apache.kafka.connect.runtime.TaskStatus)11 WorkerConnector (org.apache.kafka.connect.runtime.WorkerConnector)11 SourceConnector (org.apache.kafka.connect.source.SourceConnector)11 Herder (org.apache.kafka.connect.runtime.Herder)10 List (java.util.List)9 StatusBackingStore (org.apache.kafka.connect.storage.StatusBackingStore)9 ConnectorStateInfo (org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo)8