use of org.apache.kafka.connect.errors.ConnectException in project debezium by debezium.
the class ConnectionContext method primaryClientFor.
/**
* Obtain a client that will repeated try to obtain a client to the primary node of the replica set, waiting (and using
* this context's back-off strategy) if required until the primary becomes available.
*
* @param replicaSet the replica set information; may not be null
* @param handler the function that will be called when the primary could not be obtained; may not be null
* @return the client, or {@code null} if no primary could be found for the replica set
*/
protected Supplier<MongoClient> primaryClientFor(ReplicaSet replicaSet, PrimaryConnectFailed handler) {
Supplier<MongoClient> factory = () -> clientForPrimary(replicaSet);
int maxAttempts = maxConnectionAttemptsForPrimary();
return () -> {
int attempts = 0;
MongoClient primary = null;
while (primary == null) {
++attempts;
try {
// Try to get the primary
primary = factory.get();
if (primary != null)
break;
} catch (Throwable t) {
handler.failed(attempts, maxAttempts - attempts, t);
}
if (attempts > maxAttempts) {
throw new ConnectException("Unable to connect to primary node of '" + replicaSet + "' after " + attempts + " failed attempts");
}
handler.failed(attempts, maxAttempts - attempts, null);
primaryBackoffStrategy.sleepWhen(true);
continue;
}
return primary;
};
}
use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class WorkerSourceTask method maybeCreateTopic.
// Due to transformations that may change the destination topic of a record (such as
// RegexRouter) topic creation can not be batched for multiple topics
private void maybeCreateTopic(String topic) {
if (!topicCreation.isTopicCreationRequired(topic)) {
log.trace("Topic creation by the connector is disabled or the topic {} was previously created." + "If auto.create.topics.enable is enabled on the broker, " + "the topic will be created with default settings", topic);
return;
}
log.info("The task will send records to topic '{}' for the first time. Checking " + "whether topic exists", topic);
Map<String, TopicDescription> existing = admin.describeTopics(topic);
if (!existing.isEmpty()) {
log.info("Topic '{}' already exists.", topic);
topicCreation.addTopic(topic);
return;
}
log.info("Creating topic '{}'", topic);
TopicCreationGroup topicGroup = topicCreation.findFirstGroup(topic);
log.debug("Topic '{}' matched topic creation group: {}", topic, topicGroup);
NewTopic newTopic = topicGroup.newTopic(topic);
TopicAdmin.TopicCreationResponse response = admin.createOrFindTopics(newTopic);
if (response.isCreated(newTopic.name())) {
topicCreation.addTopic(topic);
log.info("Created topic '{}' using creation group {}", newTopic, topicGroup);
} else if (response.isExisting(newTopic.name())) {
topicCreation.addTopic(topic);
log.info("Found existing topic '{}'", newTopic);
} else {
// The topic still does not exist and could not be created, so treat it as a task failure
log.warn("Request to create new topic '{}' failed", topic);
throw new ConnectException("Task failed to create new topic " + newTopic + ". Ensure " + "that the task is authorized to create topics or that the topic exists and " + "restart the task");
}
}
use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class Worker method startTask.
/**
* Start a task managed by this worker.
*
* @param id the task ID.
* @param connProps the connector properties.
* @param taskProps the tasks properties.
* @param statusListener a listener for the runtime status transitions of the task.
* @param initialState the initial state of the connector.
* @return true if the task started successfully.
*/
public boolean startTask(ConnectorTaskId id, ClusterConfigState configState, Map<String, String> connProps, Map<String, String> taskProps, TaskStatus.Listener statusListener, TargetState initialState) {
final WorkerTask workerTask;
final TaskStatus.Listener taskStatusListener = workerMetricsGroup.wrapStatusListener(statusListener);
try (LoggingContext loggingContext = LoggingContext.forTask(id)) {
log.info("Creating task {}", id);
if (tasks.containsKey(id))
throw new ConnectException("Task already exists in this worker: " + id);
connectorStatusMetricsGroup.recordTaskAdded(id);
ClassLoader savedLoader = plugins.currentThreadLoader();
try {
String connType = connProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG);
ClassLoader connectorLoader = plugins.delegatingLoader().connectorLoader(connType);
savedLoader = Plugins.compareAndSwapLoaders(connectorLoader);
final ConnectorConfig connConfig = new ConnectorConfig(plugins, connProps);
final TaskConfig taskConfig = new TaskConfig(taskProps);
final Class<? extends Task> taskClass = taskConfig.getClass(TaskConfig.TASK_CLASS_CONFIG).asSubclass(Task.class);
final Task task = plugins.newTask(taskClass);
log.info("Instantiated task {} with version {} of type {}", id, task.version(), taskClass.getName());
// By maintaining connector's specific class loader for this thread here, we first
// search for converters within the connector dependencies.
// If any of these aren't found, that means the connector didn't configure specific converters,
// so we should instantiate based upon the worker configuration
Converter keyConverter = plugins.newConverter(connConfig, WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, ClassLoaderUsage.CURRENT_CLASSLOADER);
Converter valueConverter = plugins.newConverter(connConfig, WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, ClassLoaderUsage.CURRENT_CLASSLOADER);
HeaderConverter headerConverter = plugins.newHeaderConverter(connConfig, WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG, ClassLoaderUsage.CURRENT_CLASSLOADER);
if (keyConverter == null) {
keyConverter = plugins.newConverter(config, WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, ClassLoaderUsage.PLUGINS);
log.info("Set up the key converter {} for task {} using the worker config", keyConverter.getClass(), id);
} else {
log.info("Set up the key converter {} for task {} using the connector config", keyConverter.getClass(), id);
}
if (valueConverter == null) {
valueConverter = plugins.newConverter(config, WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, ClassLoaderUsage.PLUGINS);
log.info("Set up the value converter {} for task {} using the worker config", valueConverter.getClass(), id);
} else {
log.info("Set up the value converter {} for task {} using the connector config", valueConverter.getClass(), id);
}
if (headerConverter == null) {
headerConverter = plugins.newHeaderConverter(config, WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG, ClassLoaderUsage.PLUGINS);
log.info("Set up the header converter {} for task {} using the worker config", headerConverter.getClass(), id);
} else {
log.info("Set up the header converter {} for task {} using the connector config", headerConverter.getClass(), id);
}
workerTask = buildWorkerTask(configState, connConfig, id, task, taskStatusListener, initialState, keyConverter, valueConverter, headerConverter, connectorLoader);
workerTask.initialize(taskConfig);
Plugins.compareAndSwapLoaders(savedLoader);
} catch (Throwable t) {
log.error("Failed to start task {}", id, t);
// Can't be put in a finally block because it needs to be swapped before the call on
// statusListener
Plugins.compareAndSwapLoaders(savedLoader);
connectorStatusMetricsGroup.recordTaskRemoved(id);
taskStatusListener.onFailure(id, t);
return false;
}
WorkerTask existing = tasks.putIfAbsent(id, workerTask);
if (existing != null)
throw new ConnectException("Task already exists in this worker: " + id);
executor.submit(workerTask);
if (workerTask instanceof WorkerSourceTask) {
sourceTaskOffsetCommitter.schedule(id, (WorkerSourceTask) workerTask);
}
return true;
}
}
use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class Worker method connectorTaskConfigs.
/**
* Get a list of updated task properties for the tasks of this connector.
*
* @param connName the connector name.
* @return a list of updated tasks properties.
*/
public List<Map<String, String>> connectorTaskConfigs(String connName, ConnectorConfig connConfig) {
List<Map<String, String>> result = new ArrayList<>();
try (LoggingContext loggingContext = LoggingContext.forConnector(connName)) {
log.trace("Reconfiguring connector tasks for {}", connName);
WorkerConnector workerConnector = connectors.get(connName);
if (workerConnector == null)
throw new ConnectException("Connector " + connName + " not found in this worker.");
int maxTasks = connConfig.getInt(ConnectorConfig.TASKS_MAX_CONFIG);
Map<String, String> connOriginals = connConfig.originalsStrings();
Connector connector = workerConnector.connector();
ClassLoader savedLoader = plugins.currentThreadLoader();
try {
savedLoader = Plugins.compareAndSwapLoaders(workerConnector.loader());
String taskClassName = connector.taskClass().getName();
for (Map<String, String> taskProps : connector.taskConfigs(maxTasks)) {
// Ensure we don't modify the connector's copy of the config
Map<String, String> taskConfig = new HashMap<>(taskProps);
taskConfig.put(TaskConfig.TASK_CLASS_CONFIG, taskClassName);
if (connOriginals.containsKey(SinkTask.TOPICS_CONFIG)) {
taskConfig.put(SinkTask.TOPICS_CONFIG, connOriginals.get(SinkTask.TOPICS_CONFIG));
}
if (connOriginals.containsKey(SinkTask.TOPICS_REGEX_CONFIG)) {
taskConfig.put(SinkTask.TOPICS_REGEX_CONFIG, connOriginals.get(SinkTask.TOPICS_REGEX_CONFIG));
}
result.add(taskConfig);
}
} finally {
Plugins.compareAndSwapLoaders(savedLoader);
}
}
return result;
}
use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class Worker method startConnector.
/**
* Start a connector managed by this worker.
*
* @param connName the connector name.
* @param connProps the properties of the connector.
* @param ctx the connector runtime context.
* @param statusListener a listener for the runtime status transitions of the connector.
* @param initialState the initial state of the connector.
* @param onConnectorStateChange invoked when the initial state change of the connector is completed
*/
public void startConnector(String connName, Map<String, String> connProps, CloseableConnectorContext ctx, ConnectorStatus.Listener statusListener, TargetState initialState, Callback<TargetState> onConnectorStateChange) {
final ConnectorStatus.Listener connectorStatusListener = workerMetricsGroup.wrapStatusListener(statusListener);
try (LoggingContext loggingContext = LoggingContext.forConnector(connName)) {
if (connectors.containsKey(connName)) {
onConnectorStateChange.onCompletion(new ConnectException("Connector with name " + connName + " already exists"), null);
return;
}
final WorkerConnector workerConnector;
ClassLoader savedLoader = plugins.currentThreadLoader();
try {
// By the time we arrive here, CONNECTOR_CLASS_CONFIG has been validated already
// Getting this value from the unparsed map will allow us to instantiate the
// right config (source or sink)
final String connClass = connProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG);
ClassLoader connectorLoader = plugins.delegatingLoader().connectorLoader(connClass);
savedLoader = Plugins.compareAndSwapLoaders(connectorLoader);
log.info("Creating connector {} of type {}", connName, connClass);
final Connector connector = plugins.newConnector(connClass);
final ConnectorConfig connConfig = ConnectUtils.isSinkConnector(connector) ? new SinkConnectorConfig(plugins, connProps) : new SourceConnectorConfig(plugins, connProps, config.topicCreationEnable());
final OffsetStorageReader offsetReader = new OffsetStorageReaderImpl(offsetBackingStore, connName, internalKeyConverter, internalValueConverter);
workerConnector = new WorkerConnector(connName, connector, connConfig, ctx, metrics, connectorStatusListener, offsetReader, connectorLoader);
log.info("Instantiated connector {} with version {} of type {}", connName, connector.version(), connector.getClass());
workerConnector.transitionTo(initialState, onConnectorStateChange);
Plugins.compareAndSwapLoaders(savedLoader);
} catch (Throwable t) {
log.error("Failed to start connector {}", connName, t);
// Can't be put in a finally block because it needs to be swapped before the call on
// statusListener
Plugins.compareAndSwapLoaders(savedLoader);
connectorStatusListener.onFailure(connName, t);
onConnectorStateChange.onCompletion(t, null);
return;
}
WorkerConnector existing = connectors.putIfAbsent(connName, workerConnector);
if (existing != null) {
onConnectorStateChange.onCompletion(new ConnectException("Connector with name " + connName + " already exists"), null);
// shutdown() on it) here because it hasn't actually started running yet
return;
}
executor.submit(workerConnector);
log.info("Finished creating connector {}", connName);
}
}
Aggregations