Search in sources :

Example 81 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.

the class WorkerConnector method doShutdown.

void doShutdown() {
    try {
        TargetState preEmptedState = pendingTargetStateChange.getAndSet(null);
        Callback<TargetState> stateChangeCallback = pendingStateChangeCallback.getAndSet(null);
        if (stateChangeCallback != null) {
            stateChangeCallback.onCompletion(new ConnectException("Could not begin changing connector state to " + preEmptedState.name() + " as the connector has been scheduled for shutdown"), null);
        }
        if (state == State.STARTED)
            connector.stop();
        this.state = State.STOPPED;
        statusListener.onShutdown(connName);
        log.info("Completed shutdown for {}", this);
    } catch (Throwable t) {
        log.error("{} Error while shutting down connector", this, t);
        state = State.FAILED;
        statusListener.onFailure(connName, t);
    } finally {
        ctx.close();
        metrics.close();
    }
}
Also used : ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 82 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.

the class ConnectorConfig method transformations.

/**
 * Returns the initialized list of {@link Transformation} which are specified in {@link #TRANSFORMS_CONFIG}.
 */
public <R extends ConnectRecord<R>> List<Transformation<R>> transformations() {
    final List<String> transformAliases = getList(TRANSFORMS_CONFIG);
    final List<Transformation<R>> transformations = new ArrayList<>(transformAliases.size());
    for (String alias : transformAliases) {
        final String prefix = TRANSFORMS_CONFIG + "." + alias + ".";
        try {
            @SuppressWarnings("unchecked") final Transformation<R> transformation = Utils.newInstance(getClass(prefix + "type"), Transformation.class);
            Map<String, Object> configs = originalsWithPrefix(prefix);
            Object predicateAlias = configs.remove(PredicatedTransformation.PREDICATE_CONFIG);
            Object negate = configs.remove(PredicatedTransformation.NEGATE_CONFIG);
            transformation.configure(configs);
            if (predicateAlias != null) {
                String predicatePrefix = PREDICATES_PREFIX + predicateAlias + ".";
                @SuppressWarnings("unchecked") Predicate<R> predicate = Utils.newInstance(getClass(predicatePrefix + "type"), Predicate.class);
                predicate.configure(originalsWithPrefix(predicatePrefix));
                transformations.add(new PredicatedTransformation<>(predicate, negate == null ? false : Boolean.parseBoolean(negate.toString()), transformation));
            } else {
                transformations.add(transformation);
            }
        } catch (Exception e) {
            throw new ConnectException(e);
        }
    }
    return transformations;
}
Also used : Transformation(org.apache.kafka.connect.transforms.Transformation) ArrayList(java.util.ArrayList) ConfigException(org.apache.kafka.common.config.ConfigException) ConnectException(org.apache.kafka.connect.errors.ConnectException) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 83 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.

the class ErrorReporterTest method testDlqHeaderIsAppended.

@Test
public void testDlqHeaderIsAppended() {
    Map<String, String> props = new HashMap<>();
    props.put(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC);
    props.put(SinkConnectorConfig.DLQ_CONTEXT_HEADERS_ENABLE_CONFIG, "true");
    DeadLetterQueueReporter deadLetterQueueReporter = new DeadLetterQueueReporter(producer, config(props), TASK_ID, errorHandlingMetrics);
    ProcessingContext context = new ProcessingContext();
    context.consumerRecord(new ConsumerRecord<>("source-topic", 7, 10, "source-key".getBytes(), "source-value".getBytes()));
    context.currentContext(Stage.TRANSFORMATION, Transformation.class);
    context.error(new ConnectException("Test Exception"));
    ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(DLQ_TOPIC, "source-key".getBytes(), "source-value".getBytes());
    producerRecord.headers().add(ERROR_HEADER_ORIG_TOPIC, "dummy".getBytes());
    deadLetterQueueReporter.populateContextHeaders(producerRecord, context);
    int appearances = 0;
    for (Header header : producerRecord.headers()) {
        if (ERROR_HEADER_ORIG_TOPIC.equalsIgnoreCase(header.key())) {
            appearances++;
        }
    }
    assertEquals("source-topic", headerValue(producerRecord, ERROR_HEADER_ORIG_TOPIC));
    assertEquals(2, appearances);
}
Also used : Header(org.apache.kafka.common.header.Header) HashMap(java.util.HashMap) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) ConnectException(org.apache.kafka.connect.errors.ConnectException) Test(org.junit.Test)

Example 84 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.

the class EmbeddedConnectCluster method restartConnectorAndTasks.

/**
 * Restart an existing connector and its tasks.
 *
 * @param connName  name of the connector to be restarted
 * @param onlyFailed    true if only failed instances should be restarted
 * @param includeTasks  true if tasks should be restarted, or false if only the connector should be restarted
 * @param onlyCallOnEmptyWorker true if the REST API call should be called on a worker not running this connector or its tasks
 * @throws ConnectRestException if the REST API returns error status
 * @throws ConnectException for any other error.
 */
public ConnectorStateInfo restartConnectorAndTasks(String connName, boolean onlyFailed, boolean includeTasks, boolean onlyCallOnEmptyWorker) {
    ObjectMapper mapper = new ObjectMapper();
    String restartPath = String.format("connectors/%s/restart?onlyFailed=" + onlyFailed + "&includeTasks=" + includeTasks, connName);
    String restartEndpoint;
    if (onlyCallOnEmptyWorker) {
        restartEndpoint = endpointForResourceNotRunningConnector(restartPath, connName);
    } else {
        restartEndpoint = endpointForResource(restartPath);
    }
    Response response = requestPost(restartEndpoint, "", Collections.emptyMap());
    try {
        if (response.getStatus() < Response.Status.BAD_REQUEST.getStatusCode()) {
            // only the 202 stauts returns a body
            if (response.getStatus() == Response.Status.ACCEPTED.getStatusCode()) {
                return mapper.readerFor(ConnectorStateInfo.class).readValue(responseToString(response));
            }
        }
        return null;
    } catch (IOException e) {
        log.error("Could not read connector state from response: {}", responseToString(response), e);
        throw new ConnectException("Could not not parse connector state", e);
    }
}
Also used : Response(javax.ws.rs.core.Response) IOException(java.io.IOException) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) ConnectorStateInfo(org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 85 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.

the class EmbeddedConnectCluster method validateConnectorConfig.

/**
 * Validate a given connector configuration. If the configuration validates or
 * has a configuration error, an instance of {@link ConfigInfos} is returned. If the validation fails
 * an exception is thrown.
 *
 * @param connClassName the name of the connector class
 * @param connConfig    the intended configuration
 * @throws ConnectRestException if the REST api returns error status
 * @throws ConnectException if the configuration fails to serialize/deserialize or if the request failed to send
 */
public ConfigInfos validateConnectorConfig(String connClassName, Map<String, String> connConfig) {
    String url = endpointForResource(String.format("connector-plugins/%s/config/validate", connClassName));
    String response = putConnectorConfig(url, connConfig);
    ConfigInfos configInfos;
    try {
        configInfos = new ObjectMapper().readValue(response, ConfigInfos.class);
    } catch (IOException e) {
        throw new ConnectException("Unable deserialize response into a ConfigInfos object");
    }
    return configInfos;
}
Also used : IOException(java.io.IOException) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) ConfigInfos(org.apache.kafka.connect.runtime.rest.entities.ConfigInfos) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Aggregations

ConnectException (org.apache.kafka.connect.errors.ConnectException)184 HashMap (java.util.HashMap)38 IOException (java.io.IOException)28 Map (java.util.Map)28 ArrayList (java.util.ArrayList)23 Test (org.junit.Test)23 ExecutionException (java.util.concurrent.ExecutionException)22 TimeoutException (java.util.concurrent.TimeoutException)17 SQLException (java.sql.SQLException)16 SourceRecord (org.apache.kafka.connect.source.SourceRecord)14 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)14 Connector (org.apache.kafka.connect.connector.Connector)12 ConfigException (org.apache.kafka.common.config.ConfigException)11 TopicPartition (org.apache.kafka.common.TopicPartition)10 ConnectorTaskId (org.apache.kafka.connect.util.ConnectorTaskId)10 Collection (java.util.Collection)8 HashSet (java.util.HashSet)8 Set (java.util.Set)8 NotFoundException (org.apache.kafka.connect.errors.NotFoundException)8 SinkRecord (org.apache.kafka.connect.sink.SinkRecord)8