use of org.apache.kafka.connect.util.ConnectorTaskId in project apache-kafka-on-k8s by banzaicloud.
the class StandaloneHerder method createConnectorTasks.
private void createConnectorTasks(String connName, TargetState initialState) {
Map<String, String> connConfigs = configState.connectorConfig(connName);
for (ConnectorTaskId taskId : configState.tasks(connName)) {
Map<String, String> taskConfigMap = configState.taskConfig(taskId);
worker.startTask(taskId, connConfigs, taskConfigMap, this, initialState);
}
}
use of org.apache.kafka.connect.util.ConnectorTaskId in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConfigBackingStore method putTaskConfigs.
/**
* Write these task configurations and associated commit messages, unless an inconsistency is found that indicates
* that we would be leaving one of the referenced connectors with an inconsistent state.
*
* @param connector the connector to write task configuration
* @param configs list of task configurations for the connector
* @throws ConnectException if the task configurations do not resolve inconsistencies found in the existing root
* and task configurations.
*/
@Override
public void putTaskConfigs(String connector, List<Map<String, String>> configs) {
// any outstanding lagging data to consume.
try {
configLog.readToEnd().get(READ_TO_END_TIMEOUT_MS, TimeUnit.MILLISECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
log.error("Failed to write root configuration to Kafka: ", e);
throw new ConnectException("Error writing root configuration to Kafka", e);
}
int taskCount = configs.size();
// Start sending all the individual updates
int index = 0;
for (Map<String, String> taskConfig : configs) {
Struct connectConfig = new Struct(TASK_CONFIGURATION_V0);
connectConfig.put("properties", taskConfig);
byte[] serializedConfig = converter.fromConnectData(topic, TASK_CONFIGURATION_V0, connectConfig);
log.debug("Writing configuration for task " + index + " configuration: " + taskConfig);
ConnectorTaskId connectorTaskId = new ConnectorTaskId(connector, index);
configLog.send(TASK_KEY(connectorTaskId), serializedConfig);
index++;
}
// the end of the log
try {
// Read to end to ensure all the task configs have been written
if (taskCount > 0) {
configLog.readToEnd().get(READ_TO_END_TIMEOUT_MS, TimeUnit.MILLISECONDS);
}
// Write the commit message
Struct connectConfig = new Struct(CONNECTOR_TASKS_COMMIT_V0);
connectConfig.put("tasks", taskCount);
byte[] serializedConfig = converter.fromConnectData(topic, CONNECTOR_TASKS_COMMIT_V0, connectConfig);
log.debug("Writing commit for connector " + connector + " with " + taskCount + " tasks.");
configLog.send(COMMIT_TASKS_KEY(connector), serializedConfig);
// Read to end to ensure all the commit messages have been written
configLog.readToEnd().get(READ_TO_END_TIMEOUT_MS, TimeUnit.MILLISECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
log.error("Failed to write root configuration to Kafka: ", e);
throw new ConnectException("Error writing root configuration to Kafka", e);
}
}
use of org.apache.kafka.connect.util.ConnectorTaskId in project apache-kafka-on-k8s by banzaicloud.
the class KafkaStatusBackingStore method sendTaskStatus.
private void sendTaskStatus(final TaskStatus status, boolean safeWrite) {
ConnectorTaskId taskId = status.id();
CacheEntry<TaskStatus> entry = getOrAdd(taskId);
String key = TASK_STATUS_PREFIX + taskId.connector() + "-" + taskId.task();
send(key, status, entry, safeWrite);
}
use of org.apache.kafka.connect.util.ConnectorTaskId in project apache-kafka-on-k8s by banzaicloud.
the class KafkaStatusBackingStore method readTaskStatus.
private void readTaskStatus(String key, byte[] value) {
ConnectorTaskId id = parseConnectorTaskId(key);
if (id == null) {
log.warn("Discarding record with invalid task status key {}", key);
return;
}
if (value == null) {
log.trace("Removing task status for {}", id);
remove(id);
return;
}
TaskStatus status = parseTaskStatus(id, value);
if (status == null) {
log.warn("Failed to parse task status with key {}", key);
return;
}
synchronized (this) {
log.trace("Received task {} status update {}", id, status);
CacheEntry<TaskStatus> entry = getOrAdd(id);
entry.put(status);
}
}
use of org.apache.kafka.connect.util.ConnectorTaskId in project apache-kafka-on-k8s by banzaicloud.
the class AbstractHerderTest method connectorStatus.
@Test
public void connectorStatus() {
ConnectorTaskId taskId = new ConnectorTaskId(connector, 0);
AbstractHerder herder = partialMockBuilder(AbstractHerder.class).withConstructor(Worker.class, String.class, String.class, StatusBackingStore.class, ConfigBackingStore.class).withArgs(worker, workerId, kafkaClusterId, statusStore, configStore).addMockedMethod("generation").createMock();
EasyMock.expect(herder.generation()).andStubReturn(generation);
EasyMock.expect(herder.config(connector)).andReturn(null);
EasyMock.expect(statusStore.get(connector)).andReturn(new ConnectorStatus(connector, AbstractStatus.State.RUNNING, workerId, generation));
EasyMock.expect(statusStore.getAll(connector)).andReturn(Collections.singletonList(new TaskStatus(taskId, AbstractStatus.State.UNASSIGNED, workerId, generation)));
EasyMock.expect(worker.getPlugins()).andStubReturn(plugins);
replayAll();
ConnectorStateInfo state = herder.connectorStatus(connector);
assertEquals(connector, state.name());
assertEquals("RUNNING", state.connector().state());
assertEquals(1, state.tasks().size());
assertEquals(workerId, state.connector().workerId());
ConnectorStateInfo.TaskState taskState = state.tasks().get(0);
assertEquals(0, taskState.id());
assertEquals("UNASSIGNED", taskState.state());
assertEquals(workerId, taskState.workerId());
PowerMock.verifyAll();
}
Aggregations