use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.
the class AbstractHerderTest method testBuildRestartPlanForNoRestart.
@Test
public void testBuildRestartPlanForNoRestart() {
RestartRequest restartRequest = new RestartRequest(connector, true, false);
ConnectorTaskId taskId1 = new ConnectorTaskId(connector, 1);
ConnectorTaskId taskId2 = new ConnectorTaskId(connector, 2);
List<TaskStatus> taskStatuses = new ArrayList<>();
taskStatuses.add(new TaskStatus(taskId1, AbstractStatus.State.RUNNING, workerId, generation));
taskStatuses.add(new TaskStatus(taskId2, AbstractStatus.State.FAILED, workerId, generation));
AbstractHerder herder = partialMockBuilder(AbstractHerder.class).withConstructor(Worker.class, String.class, String.class, StatusBackingStore.class, ConfigBackingStore.class, ConnectorClientConfigOverridePolicy.class).withArgs(worker, workerId, kafkaClusterId, statusStore, configStore, noneConnectorClientConfigOverridePolicy).addMockedMethod("generation").createMock();
EasyMock.expect(herder.generation()).andStubReturn(generation);
EasyMock.expect(herder.rawConfig(connector)).andReturn(null);
EasyMock.expect(statusStore.get(connector)).andReturn(new ConnectorStatus(connector, AbstractStatus.State.RUNNING, workerId, generation));
EasyMock.expect(statusStore.getAll(connector)).andReturn(taskStatuses);
EasyMock.expect(worker.getPlugins()).andStubReturn(plugins);
replayAll();
Optional<RestartPlan> mayBeRestartPlan = herder.buildRestartPlan(restartRequest);
assertTrue(mayBeRestartPlan.isPresent());
RestartPlan restartPlan = mayBeRestartPlan.get();
assertFalse(restartPlan.shouldRestartConnector());
assertFalse(restartPlan.shouldRestartTasks());
assertTrue(restartPlan.taskIdsToRestart().isEmpty());
PowerMock.verifyAll();
}
use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.
the class DistributedHerder method reconfigureConnector.
// Updates configurations for a connector by requesting them from the connector, filling in parameters provided
// by the system, then checks whether any configs have actually changed before submitting the new configs to storage
private void reconfigureConnector(final String connName, final Callback<Void> cb) {
try {
if (!worker.isRunning(connName)) {
log.info("Skipping reconfiguration of connector {} since it is not running", connName);
return;
}
Map<String, String> configs = configState.connectorConfig(connName);
ConnectorConfig connConfig;
if (worker.isSinkConnector(connName)) {
connConfig = new SinkConnectorConfig(plugins(), configs);
} else {
connConfig = new SourceConnectorConfig(plugins(), configs, worker.isTopicCreationEnabled());
}
final List<Map<String, String>> taskProps = worker.connectorTaskConfigs(connName, connConfig);
boolean changed = false;
int currentNumTasks = configState.taskCount(connName);
if (taskProps.size() != currentNumTasks) {
log.debug("Change in connector task count from {} to {}, writing updated task configurations", currentNumTasks, taskProps.size());
changed = true;
} else {
int index = 0;
for (Map<String, String> taskConfig : taskProps) {
if (!taskConfig.equals(configState.taskConfig(new ConnectorTaskId(connName, index)))) {
log.debug("Change in task configurations, writing updated task configurations");
changed = true;
break;
}
index++;
}
}
if (changed) {
List<Map<String, String>> rawTaskProps = reverseTransform(connName, configState, taskProps);
if (isLeader()) {
configBackingStore.putTaskConfigs(connName, rawTaskProps);
cb.onCompletion(null, null);
} else {
// We cannot forward the request on the same thread because this reconfiguration can happen as a result of connector
// addition or removal. If we blocked waiting for the response from leader, we may be kicked out of the worker group.
forwardRequestExecutor.submit(() -> {
try {
String leaderUrl = leaderUrl();
if (Utils.isBlank(leaderUrl)) {
cb.onCompletion(new ConnectException("Request to leader to " + "reconfigure connector tasks failed " + "because the URL of the leader's REST interface is empty!"), null);
return;
}
String reconfigUrl = UriBuilder.fromUri(leaderUrl).path("connectors").path(connName).path("tasks").build().toString();
log.trace("Forwarding task configurations for connector {} to leader", connName);
RestClient.httpRequest(reconfigUrl, "POST", null, rawTaskProps, null, config, sessionKey, requestSignatureAlgorithm);
cb.onCompletion(null, null);
} catch (ConnectException e) {
log.error("Request to leader to reconfigure connector tasks failed", e);
cb.onCompletion(e, null);
}
});
}
}
} catch (Throwable t) {
cb.onCompletion(t, null);
}
}
use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.
the class DistributedHerder method startWork.
private void startWork() {
// Start assigned connectors and tasks
List<Callable<Void>> callables = new ArrayList<>();
// runningAssignment is always replaced by the assignment here.
synchronized (this) {
log.info("Starting connectors and tasks using config offset {}", assignment.offset());
log.debug("Received assignment: {}", assignment);
log.debug("Currently running assignment: {}", runningAssignment);
for (String connectorName : assignmentDifference(assignment.connectors(), runningAssignment.connectors())) {
callables.add(getConnectorStartingCallable(connectorName));
}
// These tasks have been stopped by this worker due to task reconfiguration. In order to
// restart them, they are removed just before the overall task startup from the set of
// currently running tasks. Therefore, they'll be restarted only if they are included in
// the assignment that was just received after rebalancing.
log.debug("Tasks to restart from currently running assignment: {}", tasksToRestart);
runningAssignment.tasks().removeAll(tasksToRestart);
tasksToRestart.clear();
for (ConnectorTaskId taskId : assignmentDifference(assignment.tasks(), runningAssignment.tasks())) {
callables.add(getTaskStartingCallable(taskId));
}
}
startAndStop(callables);
synchronized (this) {
runningAssignment = member.currentProtocolVersion() == CONNECT_PROTOCOL_V0 ? ExtendedAssignment.empty() : assignment;
}
log.info("Finished starting connectors and tasks");
}
use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.
the class ConnectProtocol method deserializeAssignment.
/**
* Given a byte buffer that contains an assignment as defined by this protocol, return the
* deserialized form of the assignment.
*
* @param buffer the buffer containing a serialized assignment
* @return the deserialized assignment
* @throws SchemaException on incompatible Connect protocol version
*/
public static Assignment deserializeAssignment(ByteBuffer buffer) {
Struct header = CONNECT_PROTOCOL_HEADER_SCHEMA.read(buffer);
Short version = header.getShort(VERSION_KEY_NAME);
checkVersionCompatibility(version);
Struct struct = ASSIGNMENT_V0.read(buffer);
short error = struct.getShort(ERROR_KEY_NAME);
String leader = struct.getString(LEADER_KEY_NAME);
String leaderUrl = struct.getString(LEADER_URL_KEY_NAME);
long offset = struct.getLong(CONFIG_OFFSET_KEY_NAME);
List<String> connectorIds = new ArrayList<>();
List<ConnectorTaskId> taskIds = new ArrayList<>();
for (Object structObj : struct.getArray(ASSIGNMENT_KEY_NAME)) {
Struct assignment = (Struct) structObj;
String connector = assignment.getString(CONNECTOR_KEY_NAME);
for (Object taskIdObj : assignment.getArray(TASKS_KEY_NAME)) {
Integer taskId = (Integer) taskIdObj;
if (taskId == CONNECTOR_TASK)
connectorIds.add(connector);
else
taskIds.add(new ConnectorTaskId(connector, taskId));
}
}
return new Assignment(error, leader, leaderUrl, offset, connectorIds, taskIds);
}
use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.
the class EagerAssignor method performTaskAssignment.
private Map<String, ByteBuffer> performTaskAssignment(String leaderId, long maxOffset, Map<String, ExtendedWorkerState> memberConfigs, WorkerCoordinator coordinator) {
Map<String, Collection<String>> connectorAssignments = new HashMap<>();
Map<String, Collection<ConnectorTaskId>> taskAssignments = new HashMap<>();
// Perform round-robin task assignment. Assign all connectors and then all tasks because assigning both the
// connector and its tasks can lead to very uneven distribution of work in some common cases (e.g. for connectors
// that generate only 1 task each; in a cluster of 2 or an even # of nodes, only even nodes will be assigned
// connectors and only odd nodes will be assigned tasks, but tasks are, on average, actually more resource
// intensive than connectors).
List<String> connectorsSorted = sorted(coordinator.configSnapshot().connectors());
CircularIterator<String> memberIt = new CircularIterator<>(sorted(memberConfigs.keySet()));
for (String connectorId : connectorsSorted) {
String connectorAssignedTo = memberIt.next();
log.trace("Assigning connector {} to {}", connectorId, connectorAssignedTo);
Collection<String> memberConnectors = connectorAssignments.computeIfAbsent(connectorAssignedTo, k -> new ArrayList<>());
memberConnectors.add(connectorId);
}
for (String connectorId : connectorsSorted) {
for (ConnectorTaskId taskId : sorted(coordinator.configSnapshot().tasks(connectorId))) {
String taskAssignedTo = memberIt.next();
log.trace("Assigning task {} to {}", taskId, taskAssignedTo);
Collection<ConnectorTaskId> memberTasks = taskAssignments.computeIfAbsent(taskAssignedTo, k -> new ArrayList<>());
memberTasks.add(taskId);
}
}
coordinator.leaderState(new LeaderState(memberConfigs, connectorAssignments, taskAssignments));
return fillAssignmentsAndSerialize(memberConfigs.keySet(), Assignment.NO_ERROR, leaderId, memberConfigs.get(leaderId).url(), maxOffset, connectorAssignments, taskAssignments);
}
Aggregations