use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class KafkaBasedLog method start.
public void start() {
log.info("Starting KafkaBasedLog with topic " + topic);
// Create the topic admin client and initialize the topic ...
// may be null
admin = topicAdminSupplier.get();
initializer.accept(admin);
// Then create the producer and consumer
producer = createProducer();
consumer = createConsumer();
List<TopicPartition> partitions = new ArrayList<>();
// We expect that the topics will have been created either manually by the user or automatically by the herder
List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
long started = time.nanoseconds();
long sleepMs = 100;
while (partitionInfos.isEmpty() && time.nanoseconds() - started < CREATE_TOPIC_TIMEOUT_NS) {
time.sleep(sleepMs);
sleepMs = Math.min(2 * sleepMs, MAX_SLEEP_MS);
partitionInfos = consumer.partitionsFor(topic);
}
if (partitionInfos.isEmpty())
throw new ConnectException("Could not look up partition metadata for offset backing store topic in" + " allotted period. This could indicate a connectivity issue, unavailable topic partitions, or if" + " this is your first use of the topic it may have taken too long to create.");
for (PartitionInfo partition : partitionInfos) partitions.add(new TopicPartition(partition.topic(), partition.partition()));
partitionCount = partitions.size();
consumer.assign(partitions);
// Always consume from the beginning of all partitions. Necessary to ensure that we don't use committed offsets
// when a 'group.id' is specified (if offsets happen to have been committed unexpectedly).
consumer.seekToBeginning(partitions);
readToLogEnd();
thread = new WorkThread();
thread.start();
log.info("Finished reading KafkaBasedLog for topic " + topic);
log.info("Started KafkaBasedLog for topic " + topic);
}
use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class SchemaSourceTask method start.
@Override
public void start(Map<String, String> props) {
final long throughput;
String name = props.get(NAME_CONFIG);
try {
id = Integer.parseInt(props.get(ID_CONFIG));
topic = props.get(TOPIC_CONFIG);
maxNumMsgs = Long.parseLong(props.get(NUM_MSGS_CONFIG));
multipleSchema = Boolean.parseBoolean(props.get(MULTIPLE_SCHEMA_CONFIG));
partitionCount = Integer.parseInt(props.getOrDefault(PARTITION_COUNT_CONFIG, "1"));
throughput = Long.parseLong(props.get(THROUGHPUT_CONFIG));
} catch (NumberFormatException e) {
throw new ConnectException("Invalid SchemaSourceTask configuration", e);
}
throttler = new ThroughputThrottler(throughput, System.currentTimeMillis());
partition = Collections.singletonMap(ID_FIELD, id);
Map<String, Object> previousOffset = this.context.offsetStorageReader().offset(partition);
if (previousOffset != null) {
seqno = (Long) previousOffset.get(SEQNO_FIELD) + 1;
} else {
seqno = 0;
}
startingSeqno = seqno;
count = 0;
log.info("Started SchemaSourceTask {}-{} producing to topic {} resuming from seqno {}", name, id, topic, startingSeqno);
}
use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class VerifiableSinkTask method start.
@Override
public void start(Map<String, String> props) {
try {
name = props.get(NAME_CONFIG);
id = Integer.parseInt(props.get(ID_CONFIG));
} catch (NumberFormatException e) {
throw new ConnectException("Invalid VerifiableSourceTask configuration", e);
}
}
use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class VerifiableSourceTask method start.
@Override
public void start(Map<String, String> props) {
final long throughput;
try {
name = props.get(NAME_CONFIG);
id = Integer.parseInt(props.get(ID_CONFIG));
topic = props.get(TOPIC_CONFIG);
throughput = Long.parseLong(props.get(THROUGHPUT_CONFIG));
} catch (NumberFormatException e) {
throw new ConnectException("Invalid VerifiableSourceTask configuration", e);
}
partition = Collections.singletonMap(ID_FIELD, id);
Map<String, Object> previousOffset = this.context.offsetStorageReader().offset(partition);
if (previousOffset != null)
seqno = (Long) previousOffset.get(SEQNO_FIELD) + 1;
else
seqno = 0;
startingSeqno = seqno;
throttler = new ThroughputThrottler(throughput, System.currentTimeMillis());
log.info("Started VerifiableSourceTask {}-{} producing to topic {} resuming from seqno {}", name, id, topic, startingSeqno);
}
use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class WorkerConnector method transitionTo.
public void transitionTo(TargetState targetState, Callback<TargetState> stateChangeCallback) {
Callback<TargetState> preEmptedStateChangeCallback;
TargetState preEmptedState;
synchronized (this) {
preEmptedStateChangeCallback = pendingStateChangeCallback.getAndSet(stateChangeCallback);
preEmptedState = pendingTargetStateChange.getAndSet(targetState);
notify();
}
if (preEmptedStateChangeCallback != null) {
preEmptedStateChangeCallback.onCompletion(new ConnectException("Could not begin changing connector state to " + preEmptedState.name() + " before another request to change state was made;" + " the new request (which is to change the state to " + targetState.name() + ") has pre-empted this one"), null);
}
}
Aggregations