Search in sources :

Example 21 with ConnectorTaskId

use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.

the class DistributedHerder method halt.

// public for testing
public void halt() {
    synchronized (this) {
        // Clean up any connectors and tasks that are still running.
        log.info("Stopping connectors and tasks that are still assigned to this worker.");
        List<Callable<Void>> callables = new ArrayList<>();
        for (String connectorName : new ArrayList<>(worker.connectorNames())) {
            callables.add(getConnectorStoppingCallable(connectorName));
        }
        for (ConnectorTaskId taskId : new ArrayList<>(worker.taskIds())) {
            callables.add(getTaskStoppingCallable(taskId));
        }
        startAndStop(callables);
        member.stop();
        // Explicitly fail any outstanding requests so they actually get a response and get an
        // understandable reason for their failure.
        DistributedHerderRequest request = requests.pollFirst();
        while (request != null) {
            request.callback().onCompletion(new ConnectException("Worker is shutting down"), null);
            request = requests.pollFirst();
        }
        stopServices();
    }
}
Also used : ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) ArrayList(java.util.ArrayList) Callable(java.util.concurrent.Callable) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 22 with ConnectorTaskId

use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.

the class DistributedHerder method taskConfigs.

@Override
public void taskConfigs(final String connName, final Callback<List<TaskInfo>> callback) {
    log.trace("Submitting get task configuration request {}", connName);
    addRequest(() -> {
        if (checkRebalanceNeeded(callback))
            return null;
        if (!configState.contains(connName)) {
            callback.onCompletion(new NotFoundException("Connector " + connName + " not found"), null);
        } else {
            List<TaskInfo> result = new ArrayList<>();
            for (int i = 0; i < configState.taskCount(connName); i++) {
                ConnectorTaskId id = new ConnectorTaskId(connName, i);
                result.add(new TaskInfo(id, configState.rawTaskConfig(id)));
            }
            callback.onCompletion(null, result);
        }
        return null;
    }, forwardErrorCallback(callback));
}
Also used : TaskInfo(org.apache.kafka.connect.runtime.rest.entities.TaskInfo) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) ArrayList(java.util.ArrayList) NotFoundException(org.apache.kafka.connect.errors.NotFoundException)

Example 23 with ConnectorTaskId

use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.

the class DistributedHerder method doRestartConnectorAndTasks.

/**
 * Builds and executes a restart plan for the connector and its tasks from <code>request</code>.
 * Execution of a plan involves triggering the stop of eligible connector/tasks and then queuing the start for eligible connector/tasks.
 *
 * @param request the request to restart connector and tasks
 */
protected synchronized void doRestartConnectorAndTasks(RestartRequest request) {
    String connectorName = request.connectorName();
    Optional<RestartPlan> maybePlan = buildRestartPlan(request);
    if (!maybePlan.isPresent()) {
        log.debug("Skipping restart of connector '{}' since no status is available: {}", connectorName, request);
        return;
    }
    RestartPlan plan = maybePlan.get();
    log.info("Executing {}", plan);
    // If requested, stop the connector and any tasks, marking each as restarting
    final ExtendedAssignment currentAssignments = assignment;
    final Collection<ConnectorTaskId> assignedIdsToRestart = plan.taskIdsToRestart().stream().filter(taskId -> currentAssignments.tasks().contains(taskId)).collect(Collectors.toList());
    final boolean restartConnector = plan.shouldRestartConnector() && currentAssignments.connectors().contains(connectorName);
    final boolean restartTasks = !assignedIdsToRestart.isEmpty();
    if (restartConnector) {
        worker.stopAndAwaitConnector(connectorName);
        onRestart(connectorName);
    }
    if (restartTasks) {
        // Stop the tasks and mark as restarting
        worker.stopAndAwaitTasks(assignedIdsToRestart);
        assignedIdsToRestart.forEach(this::onRestart);
    }
    // Now restart the connector and tasks
    if (restartConnector) {
        try {
            startConnector(connectorName, (error, targetState) -> {
                if (error == null) {
                    log.info("Connector '{}' restart successful", connectorName);
                } else {
                    log.error("Connector '{}' restart failed", connectorName, error);
                }
            });
        } catch (Throwable t) {
            log.error("Connector '{}' restart failed", connectorName, t);
        }
    }
    if (restartTasks) {
        log.debug("Restarting {} of {} tasks for {}", plan.restartTaskCount(), plan.totalTaskCount(), request);
        plan.taskIdsToRestart().forEach(taskId -> {
            try {
                if (startTask(taskId)) {
                    log.info("Task '{}' restart successful", taskId);
                } else {
                    log.error("Task '{}' restart failed", taskId);
                }
            } catch (Throwable t) {
                log.error("Task '{}' restart failed", taskId, t);
            }
        });
        log.debug("Restarted {} of {} tasks for {} as requested", plan.restartTaskCount(), plan.totalTaskCount(), request);
    }
    log.info("Completed {}", plan);
}
Also used : Worker(org.apache.kafka.connect.runtime.Worker) SinkUtils(org.apache.kafka.connect.util.SinkUtils) Arrays(java.util.Arrays) TimeoutException(java.util.concurrent.TimeoutException) AlreadyExistsException(org.apache.kafka.connect.errors.AlreadyExistsException) KeyGenerator(javax.crypto.KeyGenerator) SessionKey(org.apache.kafka.connect.runtime.SessionKey) ConnectorInfo(org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo) RestartRequest(org.apache.kafka.connect.runtime.RestartRequest) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) LogContext(org.apache.kafka.common.utils.LogContext) Map(java.util.Map) UriBuilder(javax.ws.rs.core.UriBuilder) ThreadUtils(org.apache.kafka.common.utils.ThreadUtils) RestClient(org.apache.kafka.connect.runtime.rest.RestClient) ConfigDef(org.apache.kafka.common.config.ConfigDef) CommonClientConfigs(org.apache.kafka.clients.CommonClientConfigs) Sensor(org.apache.kafka.common.metrics.Sensor) Time(org.apache.kafka.common.utils.Time) WakeupException(org.apache.kafka.common.errors.WakeupException) Collection(java.util.Collection) CumulativeSum(org.apache.kafka.common.metrics.stats.CumulativeSum) Set(java.util.Set) NavigableSet(java.util.NavigableSet) ConfigValue(org.apache.kafka.common.config.ConfigValue) Collectors(java.util.stream.Collectors) AbstractHerder(org.apache.kafka.connect.runtime.AbstractHerder) Executors(java.util.concurrent.Executors) Objects(java.util.Objects) CONNECT_PROTOCOL_V0(org.apache.kafka.connect.runtime.distributed.ConnectProtocol.CONNECT_PROTOCOL_V0) ConnectorStateInfo(org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo) CloseableConnectorContext(org.apache.kafka.connect.runtime.CloseableConnectorContext) List(java.util.List) EAGER(org.apache.kafka.connect.runtime.distributed.ConnectProtocolCompatibility.EAGER) ConnectMetrics(org.apache.kafka.connect.runtime.ConnectMetrics) SourceConnectorConfig(org.apache.kafka.connect.runtime.SourceConnectorConfig) Response(javax.ws.rs.core.Response) Optional(java.util.Optional) TargetState(org.apache.kafka.connect.runtime.TargetState) SecretKey(javax.crypto.SecretKey) BadRequestException(org.apache.kafka.connect.runtime.rest.errors.BadRequestException) ConnectorClientConfigOverridePolicy(org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy) ConnectorConfig(org.apache.kafka.connect.runtime.ConnectorConfig) TaskStatus(org.apache.kafka.connect.runtime.TaskStatus) Max(org.apache.kafka.common.metrics.stats.Max) Connector(org.apache.kafka.connect.connector.Connector) Exit(org.apache.kafka.common.utils.Exit) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) Callable(java.util.concurrent.Callable) NotFoundException(org.apache.kafka.connect.errors.NotFoundException) ConfigBackingStore(org.apache.kafka.connect.storage.ConfigBackingStore) AtomicReference(java.util.concurrent.atomic.AtomicReference) TOPIC_TRACKING_ENABLE_CONFIG(org.apache.kafka.connect.runtime.WorkerConfig.TOPIC_TRACKING_ENABLE_CONFIG) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) CONNECT_PROTOCOL_V2(org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V2) CONNECT_PROTOCOL_V1(org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V1) MetricGroup(org.apache.kafka.connect.runtime.ConnectMetrics.MetricGroup) NoSuchElementException(java.util.NoSuchElementException) ExecutorService(java.util.concurrent.ExecutorService) SinkConnector(org.apache.kafka.connect.sink.SinkConnector) Utils(org.apache.kafka.common.utils.Utils) HerderConnectorContext(org.apache.kafka.connect.runtime.HerderConnectorContext) Callback(org.apache.kafka.connect.util.Callback) RestartPlan(org.apache.kafka.connect.runtime.RestartPlan) Logger(org.slf4j.Logger) InternalRequestSignature(org.apache.kafka.connect.runtime.rest.InternalRequestSignature) HerderRequest(org.apache.kafka.connect.runtime.HerderRequest) SinkConnectorConfig(org.apache.kafka.connect.runtime.SinkConnectorConfig) ConnectRestException(org.apache.kafka.connect.runtime.rest.errors.ConnectRestException) ConnectMetricsRegistry(org.apache.kafka.connect.runtime.ConnectMetricsRegistry) TaskInfo(org.apache.kafka.connect.runtime.rest.entities.TaskInfo) StatusBackingStore(org.apache.kafka.connect.storage.StatusBackingStore) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) ConcurrentSkipListSet(java.util.concurrent.ConcurrentSkipListSet) Avg(org.apache.kafka.common.metrics.stats.Avg) ConnectException(org.apache.kafka.connect.errors.ConnectException) LinkedBlockingDeque(java.util.concurrent.LinkedBlockingDeque) Collections(java.util.Collections) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) RestartPlan(org.apache.kafka.connect.runtime.RestartPlan)

Example 24 with ConnectorTaskId

use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.

the class IncrementalCooperativeAssignor method performTaskAssignment.

/**
 * Performs task assignment based on the incremental cooperative connect protocol.
 * Read more on the design and implementation in:
 * {@see https://cwiki.apache.org/confluence/display/KAFKA/KIP-415%3A+Incremental+Cooperative+Rebalancing+in+Kafka+Connect}
 *
 * @param leaderId the ID of the group leader
 * @param maxOffset the latest known offset of the configuration topic
 * @param memberConfigs the metadata of all the members of the group as gather in the current
 * round of rebalancing
 * @param coordinator the worker coordinator instance that provide the configuration snapshot
 * and get assigned the leader state during this assignment
 * @param protocolVersion the Connect subprotocol version
 * @return the serialized assignment of tasks to the whole group, including assigned or
 * revoked tasks
 */
protected Map<String, ByteBuffer> performTaskAssignment(String leaderId, long maxOffset, Map<String, ExtendedWorkerState> memberConfigs, WorkerCoordinator coordinator, short protocolVersion) {
    log.debug("Performing task assignment during generation: {} with memberId: {}", coordinator.generationId(), coordinator.memberId());
    // Base set: The previous assignment of connectors-and-tasks is a standalone snapshot that
    // can be used to calculate derived sets
    log.debug("Previous assignments: {}", previousAssignment);
    int lastCompletedGenerationId = coordinator.lastCompletedGenerationId();
    if (previousGenerationId != lastCompletedGenerationId) {
        log.debug("Clearing the view of previous assignments due to generation mismatch between " + "previous generation ID {} and last completed generation ID {}. This can " + "happen if the leader fails to sync the assignment within a rebalancing round. " + "The following view of previous assignments might be outdated and will be " + "ignored by the leader in the current computation of new assignments. " + "Possibly outdated previous assignments: {}", previousGenerationId, lastCompletedGenerationId, previousAssignment);
        this.previousAssignment = ConnectorsAndTasks.EMPTY;
    }
    ClusterConfigState snapshot = coordinator.configSnapshot();
    Set<String> configuredConnectors = new TreeSet<>(snapshot.connectors());
    Set<ConnectorTaskId> configuredTasks = configuredConnectors.stream().flatMap(c -> snapshot.tasks(c).stream()).collect(Collectors.toSet());
    // Base set: The set of configured connectors-and-tasks is a standalone snapshot that can
    // be used to calculate derived sets
    ConnectorsAndTasks configured = new ConnectorsAndTasks.Builder().with(configuredConnectors, configuredTasks).build();
    log.debug("Configured assignments: {}", configured);
    // Base set: The set of active connectors-and-tasks is a standalone snapshot that can be
    // used to calculate derived sets
    ConnectorsAndTasks activeAssignments = assignment(memberConfigs);
    log.debug("Active assignments: {}", activeAssignments);
    // appropriately and be ready to re-apply revocation of tasks
    if (!previousRevocation.isEmpty()) {
        if (previousRevocation.connectors().stream().anyMatch(c -> activeAssignments.connectors().contains(c)) || previousRevocation.tasks().stream().anyMatch(t -> activeAssignments.tasks().contains(t))) {
            previousAssignment = activeAssignments;
            canRevoke = true;
        }
        previousRevocation.connectors().clear();
        previousRevocation.tasks().clear();
    }
    // Derived set: The set of deleted connectors-and-tasks is a derived set from the set
    // difference of previous - configured
    ConnectorsAndTasks deleted = diff(previousAssignment, configured);
    log.debug("Deleted assignments: {}", deleted);
    // Derived set: The set of remaining active connectors-and-tasks is a derived set from the
    // set difference of active - deleted
    ConnectorsAndTasks remainingActive = diff(activeAssignments, deleted);
    log.debug("Remaining (excluding deleted) active assignments: {}", remainingActive);
    // Derived set: The set of lost or unaccounted connectors-and-tasks is a derived set from
    // the set difference of previous - active - deleted
    ConnectorsAndTasks lostAssignments = diff(previousAssignment, activeAssignments, deleted);
    log.debug("Lost assignments: {}", lostAssignments);
    // Derived set: The set of new connectors-and-tasks is a derived set from the set
    // difference of configured - previous - active
    ConnectorsAndTasks newSubmissions = diff(configured, previousAssignment, activeAssignments);
    log.debug("New assignments: {}", newSubmissions);
    // A collection of the complete assignment
    List<WorkerLoad> completeWorkerAssignment = workerAssignment(memberConfigs, ConnectorsAndTasks.EMPTY);
    log.debug("Complete (ignoring deletions) worker assignments: {}", completeWorkerAssignment);
    // Per worker connector assignments without removing deleted connectors yet
    Map<String, Collection<String>> connectorAssignments = completeWorkerAssignment.stream().collect(Collectors.toMap(WorkerLoad::worker, WorkerLoad::connectors));
    log.debug("Complete (ignoring deletions) connector assignments: {}", connectorAssignments);
    // Per worker task assignments without removing deleted connectors yet
    Map<String, Collection<ConnectorTaskId>> taskAssignments = completeWorkerAssignment.stream().collect(Collectors.toMap(WorkerLoad::worker, WorkerLoad::tasks));
    log.debug("Complete (ignoring deletions) task assignments: {}", taskAssignments);
    // A collection of the current assignment excluding the connectors-and-tasks to be deleted
    List<WorkerLoad> currentWorkerAssignment = workerAssignment(memberConfigs, deleted);
    Map<String, ConnectorsAndTasks> toRevoke = computeDeleted(deleted, connectorAssignments, taskAssignments);
    log.debug("Connector and task to delete assignments: {}", toRevoke);
    // Revoking redundant connectors/tasks if the workers have duplicate assignments
    toRevoke.putAll(computeDuplicatedAssignments(memberConfigs, connectorAssignments, taskAssignments));
    log.debug("Connector and task to revoke assignments (include duplicated assignments): {}", toRevoke);
    // Recompute the complete assignment excluding the deleted connectors-and-tasks
    completeWorkerAssignment = workerAssignment(memberConfigs, deleted);
    connectorAssignments = completeWorkerAssignment.stream().collect(Collectors.toMap(WorkerLoad::worker, WorkerLoad::connectors));
    taskAssignments = completeWorkerAssignment.stream().collect(Collectors.toMap(WorkerLoad::worker, WorkerLoad::tasks));
    handleLostAssignments(lostAssignments, newSubmissions, completeWorkerAssignment, memberConfigs);
    // Do not revoke resources for re-assignment while a delayed rebalance is active
    // Also we do not revoke in two consecutive rebalances by the same leader
    canRevoke = delay == 0 && canRevoke;
    // Compute the connectors-and-tasks to be revoked for load balancing without taking into
    // account the deleted ones.
    log.debug("Can leader revoke tasks in this assignment? {} (delay: {})", canRevoke, delay);
    if (canRevoke) {
        Map<String, ConnectorsAndTasks> toExplicitlyRevoke = performTaskRevocation(activeAssignments, currentWorkerAssignment);
        log.debug("Connector and task to revoke assignments: {}", toRevoke);
        toExplicitlyRevoke.forEach((worker, assignment) -> {
            ConnectorsAndTasks existing = toRevoke.computeIfAbsent(worker, v -> new ConnectorsAndTasks.Builder().build());
            existing.connectors().addAll(assignment.connectors());
            existing.tasks().addAll(assignment.tasks());
        });
        canRevoke = toExplicitlyRevoke.size() == 0;
    } else {
        canRevoke = delay == 0;
    }
    assignConnectors(completeWorkerAssignment, newSubmissions.connectors());
    assignTasks(completeWorkerAssignment, newSubmissions.tasks());
    log.debug("Current complete assignments: {}", currentWorkerAssignment);
    log.debug("New complete assignments: {}", completeWorkerAssignment);
    Map<String, Collection<String>> currentConnectorAssignments = currentWorkerAssignment.stream().collect(Collectors.toMap(WorkerLoad::worker, WorkerLoad::connectors));
    Map<String, Collection<ConnectorTaskId>> currentTaskAssignments = currentWorkerAssignment.stream().collect(Collectors.toMap(WorkerLoad::worker, WorkerLoad::tasks));
    Map<String, Collection<String>> incrementalConnectorAssignments = diff(connectorAssignments, currentConnectorAssignments);
    Map<String, Collection<ConnectorTaskId>> incrementalTaskAssignments = diff(taskAssignments, currentTaskAssignments);
    log.debug("Incremental connector assignments: {}", incrementalConnectorAssignments);
    log.debug("Incremental task assignments: {}", incrementalTaskAssignments);
    coordinator.leaderState(new LeaderState(memberConfigs, connectorAssignments, taskAssignments));
    Map<String, ExtendedAssignment> assignments = fillAssignments(memberConfigs.keySet(), Assignment.NO_ERROR, leaderId, memberConfigs.get(leaderId).url(), maxOffset, incrementalConnectorAssignments, incrementalTaskAssignments, toRevoke, delay, protocolVersion);
    previousAssignment = computePreviousAssignment(toRevoke, connectorAssignments, taskAssignments, lostAssignments);
    previousGenerationId = coordinator.generationId();
    previousMembers = memberConfigs.keySet();
    log.debug("Actual assignments: {}", assignments);
    return serializeAssignments(assignments);
}
Also used : IntStream(java.util.stream.IntStream) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) HashMap(java.util.HashMap) Function(java.util.function.Function) ByteBuffer(java.nio.ByteBuffer) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) CONNECT_PROTOCOL_V2(org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V2) JoinGroupResponseMember(org.apache.kafka.common.message.JoinGroupResponseData.JoinGroupResponseMember) CONNECT_PROTOCOL_V1(org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V1) Assignment(org.apache.kafka.connect.runtime.distributed.ConnectProtocol.Assignment) LogContext(org.apache.kafka.common.utils.LogContext) Map(java.util.Map) LinkedHashSet(java.util.LinkedHashSet) Logger(org.slf4j.Logger) Time(org.apache.kafka.common.utils.Time) Iterator(java.util.Iterator) LeaderState(org.apache.kafka.connect.runtime.distributed.WorkerCoordinator.LeaderState) Collection(java.util.Collection) Set(java.util.Set) WorkerLoad(org.apache.kafka.connect.runtime.distributed.WorkerCoordinator.WorkerLoad) ConnectorsAndTasks(org.apache.kafka.connect.runtime.distributed.WorkerCoordinator.ConnectorsAndTasks) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) List(java.util.List) Entry(java.util.Map.Entry) Collections(java.util.Collections) ConnectorsAndTasks(org.apache.kafka.connect.runtime.distributed.WorkerCoordinator.ConnectorsAndTasks) WorkerLoad(org.apache.kafka.connect.runtime.distributed.WorkerCoordinator.WorkerLoad) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) LeaderState(org.apache.kafka.connect.runtime.distributed.WorkerCoordinator.LeaderState) TreeSet(java.util.TreeSet) Collection(java.util.Collection)

Example 25 with ConnectorTaskId

use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.

the class IncrementalCooperativeAssignor method diff.

private static ConnectorsAndTasks diff(ConnectorsAndTasks base, ConnectorsAndTasks... toSubtract) {
    Collection<String> connectors = new TreeSet<>(base.connectors());
    Collection<ConnectorTaskId> tasks = new TreeSet<>(base.tasks());
    for (ConnectorsAndTasks sub : toSubtract) {
        connectors.removeAll(sub.connectors());
        tasks.removeAll(sub.tasks());
    }
    return new ConnectorsAndTasks.Builder().with(connectors, tasks).build();
}
Also used : ConnectorsAndTasks(org.apache.kafka.connect.runtime.distributed.WorkerCoordinator.ConnectorsAndTasks) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) TreeSet(java.util.TreeSet)

Aggregations

ConnectorTaskId (org.apache.kafka.connect.util.ConnectorTaskId)111 Test (org.junit.Test)59 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)45 HashMap (java.util.HashMap)26 ArrayList (java.util.ArrayList)25 Map (java.util.Map)18 FutureCallback (org.apache.kafka.connect.util.FutureCallback)16 ConnectException (org.apache.kafka.connect.errors.ConnectException)15 Callback (org.apache.kafka.connect.util.Callback)15 Connector (org.apache.kafka.connect.connector.Connector)13 NotFoundException (org.apache.kafka.connect.errors.NotFoundException)12 ConnectorInfo (org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo)12 SinkConnector (org.apache.kafka.connect.sink.SinkConnector)12 TaskStatus (org.apache.kafka.connect.runtime.TaskStatus)11 WorkerConnector (org.apache.kafka.connect.runtime.WorkerConnector)11 SourceConnector (org.apache.kafka.connect.source.SourceConnector)11 Herder (org.apache.kafka.connect.runtime.Herder)10 List (java.util.List)9 StatusBackingStore (org.apache.kafka.connect.storage.StatusBackingStore)9 ConnectorStateInfo (org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo)8