use of org.apache.kafka.connect.util.ConnectorTaskId in project apache-kafka-on-k8s by banzaicloud.
the class DistributedHerder method taskConfigs.
@Override
public void taskConfigs(final String connName, final Callback<List<TaskInfo>> callback) {
log.trace("Submitting get task configuration request {}", connName);
addRequest(new Callable<Void>() {
@Override
public Void call() throws Exception {
if (checkRebalanceNeeded(callback))
return null;
if (!configState.contains(connName)) {
callback.onCompletion(new NotFoundException("Connector " + connName + " not found"), null);
} else {
List<TaskInfo> result = new ArrayList<>();
for (int i = 0; i < configState.taskCount(connName); i++) {
ConnectorTaskId id = new ConnectorTaskId(connName, i);
result.add(new TaskInfo(id, configState.taskConfig(id)));
}
callback.onCompletion(null, result);
}
return null;
}
}, forwardErrorCallback(callback));
}
use of org.apache.kafka.connect.util.ConnectorTaskId in project apache-kafka-on-k8s by banzaicloud.
the class DistributedHerder method halt.
// public for testing
public void halt() {
synchronized (this) {
// Clean up any connectors and tasks that are still running.
log.info("Stopping connectors and tasks that are still assigned to this worker.");
List<Callable<Void>> callables = new ArrayList<>();
for (String connectorName : new ArrayList<>(worker.connectorNames())) {
callables.add(getConnectorStoppingCallable(connectorName));
}
for (ConnectorTaskId taskId : new ArrayList<>(worker.taskIds())) {
callables.add(getTaskStoppingCallable(taskId));
}
startAndStop(callables);
member.stop();
// Explicitly fail any outstanding requests so they actually get a response and get an
// understandable reason for their failure.
HerderRequest request = requests.pollFirst();
while (request != null) {
request.callback().onCompletion(new ConnectException("Worker is shutting down"), null);
request = requests.pollFirst();
}
stopServices();
}
}
use of org.apache.kafka.connect.util.ConnectorTaskId in project apache-kafka-on-k8s by banzaicloud.
the class DistributedHerder method startWork.
private void startWork() {
// Start assigned connectors and tasks
log.info("Starting connectors and tasks using config offset {}", assignment.offset());
List<Callable<Void>> callables = new ArrayList<>();
for (String connectorName : assignment.connectors()) {
callables.add(getConnectorStartingCallable(connectorName));
}
for (ConnectorTaskId taskId : assignment.tasks()) {
callables.add(getTaskStartingCallable(taskId));
}
startAndStop(callables);
log.info("Finished starting connectors and tasks");
}
use of org.apache.kafka.connect.util.ConnectorTaskId in project apache-kafka-on-k8s by banzaicloud.
the class ConnectorsResource method restartTask.
@POST
@Path("/{connector}/tasks/{task}/restart")
public void restartTask(@PathParam("connector") final String connector, @PathParam("task") final Integer task, @QueryParam("forward") final Boolean forward) throws Throwable {
FutureCallback<Void> cb = new FutureCallback<>();
ConnectorTaskId taskId = new ConnectorTaskId(connector, task);
herder.restartTask(taskId, cb);
completeOrForwardRequest(cb, "/connectors/" + connector + "/tasks/" + task + "/restart", "POST", null, forward);
}
use of org.apache.kafka.connect.util.ConnectorTaskId in project apache-kafka-on-k8s by banzaicloud.
the class MemoryConfigBackingStore method taskConfigListAsMap.
private static Map<ConnectorTaskId, Map<String, String>> taskConfigListAsMap(String connector, List<Map<String, String>> configs) {
int index = 0;
Map<ConnectorTaskId, Map<String, String>> result = new TreeMap<>();
for (Map<String, String> taskConfigMap : configs) {
result.put(new ConnectorTaskId(connector, index++), taskConfigMap);
}
return result;
}
Aggregations