Search in sources :

Example 76 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.

the class KafkaBasedLog method start.

public void start() {
    log.info("Starting KafkaBasedLog with topic " + topic);
    // Create the topic admin client and initialize the topic ...
    // may be null
    admin = topicAdminSupplier.get();
    initializer.accept(admin);
    // Then create the producer and consumer
    producer = createProducer();
    consumer = createConsumer();
    List<TopicPartition> partitions = new ArrayList<>();
    // We expect that the topics will have been created either manually by the user or automatically by the herder
    List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
    long started = time.nanoseconds();
    long sleepMs = 100;
    while (partitionInfos.isEmpty() && time.nanoseconds() - started < CREATE_TOPIC_TIMEOUT_NS) {
        time.sleep(sleepMs);
        sleepMs = Math.min(2 * sleepMs, MAX_SLEEP_MS);
        partitionInfos = consumer.partitionsFor(topic);
    }
    if (partitionInfos.isEmpty())
        throw new ConnectException("Could not look up partition metadata for offset backing store topic in" + " allotted period. This could indicate a connectivity issue, unavailable topic partitions, or if" + " this is your first use of the topic it may have taken too long to create.");
    for (PartitionInfo partition : partitionInfos) partitions.add(new TopicPartition(partition.topic(), partition.partition()));
    partitionCount = partitions.size();
    consumer.assign(partitions);
    // Always consume from the beginning of all partitions. Necessary to ensure that we don't use committed offsets
    // when a 'group.id' is specified (if offsets happen to have been committed unexpectedly).
    consumer.seekToBeginning(partitions);
    readToLogEnd();
    thread = new WorkThread();
    thread.start();
    log.info("Finished reading KafkaBasedLog for topic " + topic);
    log.info("Started KafkaBasedLog for topic " + topic);
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) PartitionInfo(org.apache.kafka.common.PartitionInfo) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 77 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.

the class SchemaSourceTask method start.

@Override
public void start(Map<String, String> props) {
    final long throughput;
    String name = props.get(NAME_CONFIG);
    try {
        id = Integer.parseInt(props.get(ID_CONFIG));
        topic = props.get(TOPIC_CONFIG);
        maxNumMsgs = Long.parseLong(props.get(NUM_MSGS_CONFIG));
        multipleSchema = Boolean.parseBoolean(props.get(MULTIPLE_SCHEMA_CONFIG));
        partitionCount = Integer.parseInt(props.getOrDefault(PARTITION_COUNT_CONFIG, "1"));
        throughput = Long.parseLong(props.get(THROUGHPUT_CONFIG));
    } catch (NumberFormatException e) {
        throw new ConnectException("Invalid SchemaSourceTask configuration", e);
    }
    throttler = new ThroughputThrottler(throughput, System.currentTimeMillis());
    partition = Collections.singletonMap(ID_FIELD, id);
    Map<String, Object> previousOffset = this.context.offsetStorageReader().offset(partition);
    if (previousOffset != null) {
        seqno = (Long) previousOffset.get(SEQNO_FIELD) + 1;
    } else {
        seqno = 0;
    }
    startingSeqno = seqno;
    count = 0;
    log.info("Started SchemaSourceTask {}-{} producing to topic {} resuming from seqno {}", name, id, topic, startingSeqno);
}
Also used : ThroughputThrottler(org.apache.kafka.tools.ThroughputThrottler) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 78 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.

the class VerifiableSinkTask method start.

@Override
public void start(Map<String, String> props) {
    try {
        name = props.get(NAME_CONFIG);
        id = Integer.parseInt(props.get(ID_CONFIG));
    } catch (NumberFormatException e) {
        throw new ConnectException("Invalid VerifiableSourceTask configuration", e);
    }
}
Also used : ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 79 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.

the class VerifiableSourceTask method start.

@Override
public void start(Map<String, String> props) {
    final long throughput;
    try {
        name = props.get(NAME_CONFIG);
        id = Integer.parseInt(props.get(ID_CONFIG));
        topic = props.get(TOPIC_CONFIG);
        throughput = Long.parseLong(props.get(THROUGHPUT_CONFIG));
    } catch (NumberFormatException e) {
        throw new ConnectException("Invalid VerifiableSourceTask configuration", e);
    }
    partition = Collections.singletonMap(ID_FIELD, id);
    Map<String, Object> previousOffset = this.context.offsetStorageReader().offset(partition);
    if (previousOffset != null)
        seqno = (Long) previousOffset.get(SEQNO_FIELD) + 1;
    else
        seqno = 0;
    startingSeqno = seqno;
    throttler = new ThroughputThrottler(throughput, System.currentTimeMillis());
    log.info("Started VerifiableSourceTask {}-{} producing to topic {} resuming from seqno {}", name, id, topic, startingSeqno);
}
Also used : ThroughputThrottler(org.apache.kafka.tools.ThroughputThrottler) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 80 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.

the class WorkerConnector method transitionTo.

public void transitionTo(TargetState targetState, Callback<TargetState> stateChangeCallback) {
    Callback<TargetState> preEmptedStateChangeCallback;
    TargetState preEmptedState;
    synchronized (this) {
        preEmptedStateChangeCallback = pendingStateChangeCallback.getAndSet(stateChangeCallback);
        preEmptedState = pendingTargetStateChange.getAndSet(targetState);
        notify();
    }
    if (preEmptedStateChangeCallback != null) {
        preEmptedStateChangeCallback.onCompletion(new ConnectException("Could not begin changing connector state to " + preEmptedState.name() + " before another request to change state was made;" + " the new request (which is to change the state to " + targetState.name() + ") has pre-empted this one"), null);
    }
}
Also used : ConnectException(org.apache.kafka.connect.errors.ConnectException)

Aggregations

ConnectException (org.apache.kafka.connect.errors.ConnectException)184 HashMap (java.util.HashMap)38 IOException (java.io.IOException)28 Map (java.util.Map)28 ArrayList (java.util.ArrayList)23 Test (org.junit.Test)23 ExecutionException (java.util.concurrent.ExecutionException)22 TimeoutException (java.util.concurrent.TimeoutException)17 SQLException (java.sql.SQLException)16 SourceRecord (org.apache.kafka.connect.source.SourceRecord)14 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)14 Connector (org.apache.kafka.connect.connector.Connector)12 ConfigException (org.apache.kafka.common.config.ConfigException)11 TopicPartition (org.apache.kafka.common.TopicPartition)10 ConnectorTaskId (org.apache.kafka.connect.util.ConnectorTaskId)10 Collection (java.util.Collection)8 HashSet (java.util.HashSet)8 Set (java.util.Set)8 NotFoundException (org.apache.kafka.connect.errors.NotFoundException)8 SinkRecord (org.apache.kafka.connect.sink.SinkRecord)8