use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.
the class ExtendedAssignment method extractTasks.
private static Collection<ConnectorTaskId> extractTasks(Struct struct, String key) {
assert REVOKED_KEY_NAME.equals(key) || ASSIGNMENT_KEY_NAME.equals(key);
Object[] tasks = struct.getArray(key);
if (tasks == null) {
return Collections.emptyList();
}
List<ConnectorTaskId> tasksIds = new ArrayList<>();
for (Object structObj : tasks) {
Struct assignment = (Struct) structObj;
String connector = assignment.getString(CONNECTOR_KEY_NAME);
for (Object taskIdObj : assignment.getArray(TASKS_KEY_NAME)) {
Integer taskId = (Integer) taskIdObj;
if (taskId != CONNECTOR_TASK) {
tasksIds.add(new ConnectorTaskId(connector, taskId));
}
}
}
return tasksIds;
}
use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.
the class IncrementalCooperativeAssignor method computeDuplicatedAssignments.
private Map<String, ConnectorsAndTasks> computeDuplicatedAssignments(Map<String, ExtendedWorkerState> memberConfigs, Map<String, Collection<String>> connectorAssignments, Map<String, Collection<ConnectorTaskId>> taskAssignment) {
ConnectorsAndTasks duplicatedAssignments = duplicatedAssignments(memberConfigs);
log.debug("Duplicated assignments: {}", duplicatedAssignments);
Map<String, ConnectorsAndTasks> toRevoke = new HashMap<>();
if (!duplicatedAssignments.connectors().isEmpty()) {
connectorAssignments.entrySet().stream().forEach(entry -> {
Set<String> duplicatedConnectors = new HashSet<>(duplicatedAssignments.connectors());
duplicatedConnectors.retainAll(entry.getValue());
if (!duplicatedConnectors.isEmpty()) {
toRevoke.computeIfAbsent(entry.getKey(), v -> new ConnectorsAndTasks.Builder().build()).connectors().addAll(duplicatedConnectors);
}
});
}
if (!duplicatedAssignments.tasks().isEmpty()) {
taskAssignment.entrySet().stream().forEach(entry -> {
Set<ConnectorTaskId> duplicatedTasks = new HashSet<>(duplicatedAssignments.tasks());
duplicatedTasks.retainAll(entry.getValue());
if (!duplicatedTasks.isEmpty()) {
toRevoke.computeIfAbsent(entry.getKey(), v -> new ConnectorsAndTasks.Builder().build()).tasks().addAll(duplicatedTasks);
}
});
}
return toRevoke;
}
use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.
the class Worker method awaitStopTasks.
private void awaitStopTasks(Collection<ConnectorTaskId> ids) {
long now = time.milliseconds();
long deadline = now + config.getLong(WorkerConfig.TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_CONFIG);
for (ConnectorTaskId id : ids) {
long remaining = Math.max(0, deadline - time.milliseconds());
awaitStopTask(id, remaining);
}
}
use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.
the class ClusterConfigState method tasks.
/**
* Get the current set of task IDs for the specified connector.
* @param connectorName the name of the connector to look up task configs for
* @return the current set of connector task IDs
*/
public List<ConnectorTaskId> tasks(String connectorName) {
if (inconsistentConnectors.contains(connectorName)) {
return Collections.emptyList();
}
Integer numTasks = connectorTaskCounts.get(connectorName);
if (numTasks == null) {
return Collections.emptyList();
}
List<ConnectorTaskId> taskIds = new ArrayList<>(numTasks);
for (int taskIndex = 0; taskIndex < numTasks; taskIndex++) {
ConnectorTaskId taskId = new ConnectorTaskId(connectorName, taskIndex);
taskIds.add(taskId);
}
return Collections.unmodifiableList(taskIds);
}
use of org.apache.kafka.connect.util.ConnectorTaskId in project kafka by apache.
the class KafkaStatusBackingStoreFormatTest method putTopicStateShouldOverridePreviousState.
@Test
public void putTopicStateShouldOverridePreviousState() {
TopicStatus firstTopicStatus = new TopicStatus(FOO_TOPIC, new ConnectorTaskId(FOO_CONNECTOR, 0), time.milliseconds());
time.sleep(1000);
TopicStatus secondTopicStatus = new TopicStatus(BAR_TOPIC, new ConnectorTaskId(FOO_CONNECTOR, 0), time.milliseconds());
String firstKey = TOPIC_STATUS_PREFIX + FOO_TOPIC + TOPIC_STATUS_SEPARATOR + FOO_CONNECTOR;
String secondKey = TOPIC_STATUS_PREFIX + BAR_TOPIC + TOPIC_STATUS_SEPARATOR + FOO_CONNECTOR;
ArgumentCaptor<byte[]> valueCaptor = ArgumentCaptor.forClass(byte[].class);
doAnswer(invocation -> {
((Callback) invocation.getArgument(2)).onCompletion(null, null);
ConsumerRecord<String, byte[]> statusRecord = new ConsumerRecord<>(STATUS_TOPIC, 0, 0, secondKey, valueCaptor.getValue());
store.read(statusRecord);
return null;
}).when(kafkaBasedLog).send(eq(secondKey), valueCaptor.capture(), any(Callback.class));
byte[] value = store.serializeTopicStatus(firstTopicStatus);
ConsumerRecord<String, byte[]> statusRecord = new ConsumerRecord<>(STATUS_TOPIC, 0, 0, firstKey, value);
store.read(statusRecord);
store.put(secondTopicStatus);
// check capture state
assertEquals(secondTopicStatus, store.parseTopicStatus(valueCaptor.getValue()));
assertEquals(firstTopicStatus, store.getTopic(FOO_CONNECTOR, FOO_TOPIC));
assertEquals(secondTopicStatus, store.getTopic(FOO_CONNECTOR, BAR_TOPIC));
assertEquals(new HashSet<>(Arrays.asList(firstTopicStatus, secondTopicStatus)), new HashSet<>(store.getAllTopics(FOO_CONNECTOR)));
}
Aggregations