use of org.apache.kafka.connect.errors.ConnectException in project ignite by apache.
the class IgniteSinkTask method put.
/**
* Buffers records.
*
* @param records Records to inject into grid.
*/
@SuppressWarnings("unchecked")
@Override
public void put(Collection<SinkRecord> records) {
try {
for (SinkRecord record : records) {
// Data is flushed asynchronously when CACHE_PER_NODE_DATA_SIZE is reached.
if (extractor != null) {
Map.Entry<Object, Object> entry = extractor.extract(record);
StreamerContext.getStreamer().addData(entry.getKey(), entry.getValue());
} else {
if (record.key() != null) {
StreamerContext.getStreamer().addData(record.key(), record.value());
} else {
log.error("Failed to stream a record with null key!");
}
}
}
} catch (ConnectException e) {
log.error("Failed adding record", e);
throw new ConnectException(e);
}
}
use of org.apache.kafka.connect.errors.ConnectException in project ignite by apache.
the class IgniteSourceConnector method start.
/** {@inheritDoc} */
@Override
public void start(Map<String, String> props) {
try {
A.notNullOrEmpty(props.get(IgniteSourceConstants.CACHE_NAME), "cache name");
A.notNullOrEmpty(props.get(IgniteSourceConstants.CACHE_CFG_PATH), "path to cache config file");
A.notNullOrEmpty(props.get(IgniteSourceConstants.CACHE_EVENTS), "Registered cache events");
A.notNullOrEmpty(props.get(IgniteSourceConstants.TOPIC_NAMES), "Kafka topics");
} catch (IllegalArgumentException e) {
throw new ConnectException("Cannot start IgniteSourceConnector due to configuration error", e);
}
configProps = props;
}
use of org.apache.kafka.connect.errors.ConnectException in project ignite by apache.
the class IgniteSourceTask method start.
/**
* Filtering is done remotely. Local listener buffers data for injection into Kafka.
*
* @param props Task properties.
*/
@Override
public void start(Map<String, String> props) {
synchronized (lock) {
// Nothing to do if the task has been already started.
if (!stopped)
return;
cacheName = props.get(IgniteSourceConstants.CACHE_NAME);
igniteCfgFile = props.get(IgniteSourceConstants.CACHE_CFG_PATH);
topics = props.get(IgniteSourceConstants.TOPIC_NAMES).split("\\s*,\\s*");
if (props.containsKey(IgniteSourceConstants.INTL_BUF_SIZE))
evtBufSize = Integer.parseInt(props.get(IgniteSourceConstants.INTL_BUF_SIZE));
if (props.containsKey(IgniteSourceConstants.INTL_BATCH_SIZE))
evtBatchSize = Integer.parseInt(props.get(IgniteSourceConstants.INTL_BATCH_SIZE));
if (props.containsKey(IgniteSourceConstants.CACHE_FILTER_CLASS)) {
String filterCls = props.get(IgniteSourceConstants.CACHE_FILTER_CLASS);
if (filterCls != null && !filterCls.isEmpty()) {
try {
Class<? extends IgnitePredicate<CacheEvent>> clazz = (Class<? extends IgnitePredicate<CacheEvent>>) Class.forName(filterCls);
filter = clazz.newInstance();
} catch (Exception e) {
log.error("Failed to instantiate the provided filter! " + "User-enabled filtering is ignored!", e);
}
}
}
TaskRemoteFilter rmtLsnr = new TaskRemoteFilter(cacheName);
try {
int[] evts = cacheEvents(props.get(IgniteSourceConstants.CACHE_EVENTS));
rmtLsnrId = IgniteGrid.getIgnite().events(IgniteGrid.getIgnite().cluster().forCacheNodes(cacheName)).remoteListen(locLsnr, rmtLsnr, evts);
} catch (Exception e) {
log.error("Failed to register event listener!", e);
throw new ConnectException(e);
} finally {
stopped = false;
}
}
}
use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class WorkerSourceTask method sendRecords.
/**
* Try to send a batch of records. If a send fails and is retriable, this saves the remainder of the batch so it can
* be retried after backing off. If a send fails and is not retriable, this will throw a ConnectException.
* @return true if all messages were sent, false if some need to be retried
*/
private boolean sendRecords() {
int processed = 0;
for (final SourceRecord preTransformRecord : toSend) {
final SourceRecord record = transformationChain.apply(preTransformRecord);
if (record == null) {
commitTaskRecord(preTransformRecord);
continue;
}
byte[] key = keyConverter.fromConnectData(record.topic(), record.keySchema(), record.key());
byte[] value = valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value());
final ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(record.topic(), record.kafkaPartition(), ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value);
log.trace("Appending record with key {}, value {}", record.key(), record.value());
// messages and update the offsets.
synchronized (this) {
if (!lastSendFailed) {
if (!flushing) {
outstandingMessages.put(producerRecord, producerRecord);
} else {
outstandingMessagesBacklog.put(producerRecord, producerRecord);
}
// Offsets are converted & serialized in the OffsetWriter
offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
}
}
try {
final String topic = producerRecord.topic();
producer.send(producerRecord, new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {
// Given the default settings for zero data loss, this should basically never happen --
// between "infinite" retries, indefinite blocking on full buffers, and "infinite" request
// timeouts, callbacks with exceptions should never be invoked in practice. If the
// user overrode these settings, the best we can do is notify them of the failure via
// logging.
log.error("{} failed to send record to {}: {}", id, topic, e);
log.debug("Failed record: {}", preTransformRecord);
} else {
log.trace("Wrote record successfully: topic {} partition {} offset {}", recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
commitTaskRecord(preTransformRecord);
}
recordSent(producerRecord);
}
});
lastSendFailed = false;
} catch (RetriableException e) {
log.warn("Failed to send {}, backing off before retrying:", producerRecord, e);
toSend = toSend.subList(processed, toSend.size());
lastSendFailed = true;
return false;
} catch (KafkaException e) {
throw new ConnectException("Unrecoverable exception trying to send", e);
}
processed++;
}
toSend = null;
return true;
}
use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class DistributedHerder method restartConnector.
@Override
public void restartConnector(final String connName, final Callback<Void> callback) {
addRequest(new Callable<Void>() {
@Override
public Void call() throws Exception {
if (checkRebalanceNeeded(callback))
return null;
if (!configState.connectors().contains(connName)) {
callback.onCompletion(new NotFoundException("Unknown connector: " + connName), null);
return null;
}
if (assignment.connectors().contains(connName)) {
try {
worker.stopConnector(connName);
if (startConnector(connName))
callback.onCompletion(null, null);
else
callback.onCompletion(new ConnectException("Failed to start connector: " + connName), null);
} catch (Throwable t) {
callback.onCompletion(t, null);
}
} else if (isLeader()) {
callback.onCompletion(new NotAssignedException("Cannot restart connector since it is not assigned to this member", member.ownerUrl(connName)), null);
} else {
callback.onCompletion(new NotLeaderException("Cannot restart connector since it is not assigned to this member", leaderUrl()), null);
}
return null;
}
}, forwardErrorCallback(callback));
}
Aggregations