use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class KafkaBasedLog method start.
public void start() {
log.info("Starting KafkaBasedLog with topic " + topic);
producer = createProducer();
consumer = createConsumer();
List<TopicPartition> partitions = new ArrayList<>();
// Until we have admin utilities we can use to check for the existence of this topic and create it if it is missing,
// we rely on topic auto-creation
List<PartitionInfo> partitionInfos = null;
long started = time.milliseconds();
while (partitionInfos == null && time.milliseconds() - started < CREATE_TOPIC_TIMEOUT_MS) {
partitionInfos = consumer.partitionsFor(topic);
Utils.sleep(Math.min(time.milliseconds() - started, 1000));
}
if (partitionInfos == null)
throw new ConnectException("Could not look up partition metadata for offset backing store topic in" + " allotted period. This could indicate a connectivity issue, unavailable topic partitions, or if" + " this is your first use of the topic it may have taken too long to create.");
for (PartitionInfo partition : partitionInfos) partitions.add(new TopicPartition(partition.topic(), partition.partition()));
consumer.assign(partitions);
readToLogEnd();
thread = new WorkThread();
thread.start();
log.info("Finished reading KafkaBasedLog for topic " + topic);
log.info("Started KafkaBasedLog for topic " + topic);
}
use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class SchemaSourceTask method start.
@Override
public void start(Map<String, String> props) {
final long throughput;
try {
name = props.get(NAME_CONFIG);
id = Integer.parseInt(props.get(ID_CONFIG));
topic = props.get(TOPIC_CONFIG);
maxNumMsgs = Long.parseLong(props.get(NUM_MSGS_CONFIG));
multipleSchema = Boolean.parseBoolean(props.get(MULTIPLE_SCHEMA_CONFIG));
partitionCount = Integer.parseInt(props.containsKey(PARTITION_COUNT_CONFIG) ? props.get(PARTITION_COUNT_CONFIG) : "1");
throughput = Long.parseLong(props.get(THROUGHPUT_CONFIG));
} catch (NumberFormatException e) {
throw new ConnectException("Invalid SchemaSourceTask configuration", e);
}
throttler = new ThroughputThrottler(throughput, System.currentTimeMillis());
partition = Collections.singletonMap(ID_FIELD, id);
Map<String, Object> previousOffset = this.context.offsetStorageReader().offset(partition);
if (previousOffset != null) {
seqno = (Long) previousOffset.get(SEQNO_FIELD) + 1;
} else {
seqno = 0;
}
startingSeqno = seqno;
count = 0;
log.info("Started SchemaSourceTask {}-{} producing to topic {} resuming from seqno {}", name, id, topic, startingSeqno);
}
use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class VerifiableSinkTask method start.
@Override
public void start(Map<String, String> props) {
try {
name = props.get(NAME_CONFIG);
id = Integer.parseInt(props.get(ID_CONFIG));
} catch (NumberFormatException e) {
throw new ConnectException("Invalid VerifiableSourceTask configuration", e);
}
}
use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class Worker method connectorTaskConfigs.
/**
* Get a list of updated task properties for the tasks of this connector.
*
* @param connName the connector name.
* @param maxTasks the maxinum number of tasks.
* @param sinkTopics a list of sink topics.
* @return a list of updated tasks properties.
*/
public List<Map<String, String>> connectorTaskConfigs(String connName, int maxTasks, List<String> sinkTopics) {
log.trace("Reconfiguring connector tasks for {}", connName);
WorkerConnector workerConnector = connectors.get(connName);
if (workerConnector == null)
throw new ConnectException("Connector " + connName + " not found in this worker.");
Connector connector = workerConnector.connector();
List<Map<String, String>> result = new ArrayList<>();
String taskClassName = connector.taskClass().getName();
for (Map<String, String> taskProps : connector.taskConfigs(maxTasks)) {
// Ensure we don't modify the connector's copy of the config
Map<String, String> taskConfig = new HashMap<>(taskProps);
taskConfig.put(TaskConfig.TASK_CLASS_CONFIG, taskClassName);
if (sinkTopics != null)
taskConfig.put(SinkTask.TOPICS_CONFIG, Utils.join(sinkTopics, ","));
result.add(taskConfig);
}
return result;
}
use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class WorkerSinkTask method initializeAndStart.
/**
* Initializes and starts the SinkTask.
*/
protected void initializeAndStart() {
log.debug("Initializing task {} ", id);
String topicsStr = taskConfig.get(SinkTask.TOPICS_CONFIG);
if (topicsStr == null || topicsStr.isEmpty())
throw new ConnectException("Sink tasks require a list of topics.");
String[] topics = topicsStr.split(",");
log.debug("Task {} subscribing to topics {}", id, topics);
consumer.subscribe(Arrays.asList(topics), new HandleRebalance());
task.initialize(context);
task.start(taskConfig);
log.info("Sink task {} finished initialization and start", this);
}
Aggregations