Search in sources :

Example 16 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project ignite by apache.

the class IgniteSinkTask method put.

/**
     * Buffers records.
     *
     * @param records Records to inject into grid.
     */
@SuppressWarnings("unchecked")
@Override
public void put(Collection<SinkRecord> records) {
    try {
        for (SinkRecord record : records) {
            // Data is flushed asynchronously when CACHE_PER_NODE_DATA_SIZE is reached.
            if (extractor != null) {
                Map.Entry<Object, Object> entry = extractor.extract(record);
                StreamerContext.getStreamer().addData(entry.getKey(), entry.getValue());
            } else {
                if (record.key() != null) {
                    StreamerContext.getStreamer().addData(record.key(), record.value());
                } else {
                    log.error("Failed to stream a record with null key!");
                }
            }
        }
    } catch (ConnectException e) {
        log.error("Failed adding record", e);
        throw new ConnectException(e);
    }
}
Also used : SinkRecord(org.apache.kafka.connect.sink.SinkRecord) Map(java.util.Map) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 17 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project ignite by apache.

the class IgniteSourceConnector method start.

/** {@inheritDoc} */
@Override
public void start(Map<String, String> props) {
    try {
        A.notNullOrEmpty(props.get(IgniteSourceConstants.CACHE_NAME), "cache name");
        A.notNullOrEmpty(props.get(IgniteSourceConstants.CACHE_CFG_PATH), "path to cache config file");
        A.notNullOrEmpty(props.get(IgniteSourceConstants.CACHE_EVENTS), "Registered cache events");
        A.notNullOrEmpty(props.get(IgniteSourceConstants.TOPIC_NAMES), "Kafka topics");
    } catch (IllegalArgumentException e) {
        throw new ConnectException("Cannot start IgniteSourceConnector due to configuration error", e);
    }
    configProps = props;
}
Also used : ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 18 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project ignite by apache.

the class IgniteSourceTask method start.

/**
     * Filtering is done remotely. Local listener buffers data for injection into Kafka.
     *
     * @param props Task properties.
     */
@Override
public void start(Map<String, String> props) {
    synchronized (lock) {
        // Nothing to do if the task has been already started.
        if (!stopped)
            return;
        cacheName = props.get(IgniteSourceConstants.CACHE_NAME);
        igniteCfgFile = props.get(IgniteSourceConstants.CACHE_CFG_PATH);
        topics = props.get(IgniteSourceConstants.TOPIC_NAMES).split("\\s*,\\s*");
        if (props.containsKey(IgniteSourceConstants.INTL_BUF_SIZE))
            evtBufSize = Integer.parseInt(props.get(IgniteSourceConstants.INTL_BUF_SIZE));
        if (props.containsKey(IgniteSourceConstants.INTL_BATCH_SIZE))
            evtBatchSize = Integer.parseInt(props.get(IgniteSourceConstants.INTL_BATCH_SIZE));
        if (props.containsKey(IgniteSourceConstants.CACHE_FILTER_CLASS)) {
            String filterCls = props.get(IgniteSourceConstants.CACHE_FILTER_CLASS);
            if (filterCls != null && !filterCls.isEmpty()) {
                try {
                    Class<? extends IgnitePredicate<CacheEvent>> clazz = (Class<? extends IgnitePredicate<CacheEvent>>) Class.forName(filterCls);
                    filter = clazz.newInstance();
                } catch (Exception e) {
                    log.error("Failed to instantiate the provided filter! " + "User-enabled filtering is ignored!", e);
                }
            }
        }
        TaskRemoteFilter rmtLsnr = new TaskRemoteFilter(cacheName);
        try {
            int[] evts = cacheEvents(props.get(IgniteSourceConstants.CACHE_EVENTS));
            rmtLsnrId = IgniteGrid.getIgnite().events(IgniteGrid.getIgnite().cluster().forCacheNodes(cacheName)).remoteListen(locLsnr, rmtLsnr, evts);
        } catch (Exception e) {
            log.error("Failed to register event listener!", e);
            throw new ConnectException(e);
        } finally {
            stopped = false;
        }
    }
}
Also used : IgnitePredicate(org.apache.ignite.lang.IgnitePredicate) CacheEvent(org.apache.ignite.events.CacheEvent) IgniteException(org.apache.ignite.IgniteException) ConnectException(org.apache.kafka.connect.errors.ConnectException) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 19 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.

the class WorkerSourceTask method sendRecords.

/**
     * Try to send a batch of records. If a send fails and is retriable, this saves the remainder of the batch so it can
     * be retried after backing off. If a send fails and is not retriable, this will throw a ConnectException.
     * @return true if all messages were sent, false if some need to be retried
     */
private boolean sendRecords() {
    int processed = 0;
    for (final SourceRecord preTransformRecord : toSend) {
        final SourceRecord record = transformationChain.apply(preTransformRecord);
        if (record == null) {
            commitTaskRecord(preTransformRecord);
            continue;
        }
        byte[] key = keyConverter.fromConnectData(record.topic(), record.keySchema(), record.key());
        byte[] value = valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value());
        final ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(record.topic(), record.kafkaPartition(), ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value);
        log.trace("Appending record with key {}, value {}", record.key(), record.value());
        // messages and update the offsets.
        synchronized (this) {
            if (!lastSendFailed) {
                if (!flushing) {
                    outstandingMessages.put(producerRecord, producerRecord);
                } else {
                    outstandingMessagesBacklog.put(producerRecord, producerRecord);
                }
                // Offsets are converted & serialized in the OffsetWriter
                offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
            }
        }
        try {
            final String topic = producerRecord.topic();
            producer.send(producerRecord, new Callback() {

                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                    if (e != null) {
                        // Given the default settings for zero data loss, this should basically never happen --
                        // between "infinite" retries, indefinite blocking on full buffers, and "infinite" request
                        // timeouts, callbacks with exceptions should never be invoked in practice. If the
                        // user overrode these settings, the best we can do is notify them of the failure via
                        // logging.
                        log.error("{} failed to send record to {}: {}", id, topic, e);
                        log.debug("Failed record: {}", preTransformRecord);
                    } else {
                        log.trace("Wrote record successfully: topic {} partition {} offset {}", recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
                        commitTaskRecord(preTransformRecord);
                    }
                    recordSent(producerRecord);
                }
            });
            lastSendFailed = false;
        } catch (RetriableException e) {
            log.warn("Failed to send {}, backing off before retrying:", producerRecord, e);
            toSend = toSend.subList(processed, toSend.size());
            lastSendFailed = true;
            return false;
        } catch (KafkaException e) {
            throw new ConnectException("Unrecoverable exception trying to send", e);
        }
        processed++;
    }
    toSend = null;
    return true;
}
Also used : SourceRecord(org.apache.kafka.connect.source.SourceRecord) KafkaException(org.apache.kafka.common.KafkaException) TimeoutException(java.util.concurrent.TimeoutException) RetriableException(org.apache.kafka.common.errors.RetriableException) ExecutionException(java.util.concurrent.ExecutionException) ConnectException(org.apache.kafka.connect.errors.ConnectException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Callback(org.apache.kafka.clients.producer.Callback) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) KafkaException(org.apache.kafka.common.KafkaException) RetriableException(org.apache.kafka.common.errors.RetriableException) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 20 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.

the class DistributedHerder method restartConnector.

@Override
public void restartConnector(final String connName, final Callback<Void> callback) {
    addRequest(new Callable<Void>() {

        @Override
        public Void call() throws Exception {
            if (checkRebalanceNeeded(callback))
                return null;
            if (!configState.connectors().contains(connName)) {
                callback.onCompletion(new NotFoundException("Unknown connector: " + connName), null);
                return null;
            }
            if (assignment.connectors().contains(connName)) {
                try {
                    worker.stopConnector(connName);
                    if (startConnector(connName))
                        callback.onCompletion(null, null);
                    else
                        callback.onCompletion(new ConnectException("Failed to start connector: " + connName), null);
                } catch (Throwable t) {
                    callback.onCompletion(t, null);
                }
            } else if (isLeader()) {
                callback.onCompletion(new NotAssignedException("Cannot restart connector since it is not assigned to this member", member.ownerUrl(connName)), null);
            } else {
                callback.onCompletion(new NotLeaderException("Cannot restart connector since it is not assigned to this member", leaderUrl()), null);
            }
            return null;
        }
    }, forwardErrorCallback(callback));
}
Also used : NotFoundException(org.apache.kafka.connect.errors.NotFoundException) TimeoutException(java.util.concurrent.TimeoutException) AlreadyExistsException(org.apache.kafka.connect.errors.AlreadyExistsException) WakeupException(org.apache.kafka.common.errors.WakeupException) BadRequestException(org.apache.kafka.connect.runtime.rest.errors.BadRequestException) NotFoundException(org.apache.kafka.connect.errors.NotFoundException) NoSuchElementException(java.util.NoSuchElementException) ConnectException(org.apache.kafka.connect.errors.ConnectException) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Aggregations

ConnectException (org.apache.kafka.connect.errors.ConnectException)42 HashMap (java.util.HashMap)7 Map (java.util.Map)7 ArrayList (java.util.ArrayList)6 TimeoutException (java.util.concurrent.TimeoutException)6 IOException (java.io.IOException)5 Connector (org.apache.kafka.connect.connector.Connector)5 ExecutionException (java.util.concurrent.ExecutionException)4 NotFoundException (org.apache.kafka.connect.errors.NotFoundException)4 ConnectorTaskId (org.apache.kafka.connect.util.ConnectorTaskId)4 Test (org.junit.Test)4 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)4 ByteBuffer (java.nio.ByteBuffer)3 AlreadyExistsException (org.apache.kafka.connect.errors.AlreadyExistsException)3 BadRequestException (org.apache.kafka.connect.runtime.rest.errors.BadRequestException)3 SinkRecord (org.apache.kafka.connect.sink.SinkRecord)3 SourceRecord (org.apache.kafka.connect.source.SourceRecord)3 ThreadedTest (org.apache.kafka.connect.util.ThreadedTest)3 BufferedReader (java.io.BufferedReader)2 FileInputStream (java.io.FileInputStream)2