use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class WorkerSinkTaskTest method testPreCommitFailureAfterPartialRevocationAndAssignment.
@Test
public void testPreCommitFailureAfterPartialRevocationAndAssignment() throws Exception {
createTask(initialState);
// First poll; assignment is [TP1, TP2]
expectInitializeTask();
expectTaskGetTopic(true);
expectPollInitialAssignment();
// Second poll; a single record is delivered from TP1
expectConsumerPoll(1);
expectConversionAndTransformation(1);
sinkTask.put(EasyMock.anyObject());
EasyMock.expectLastCall();
// Third poll; assignment changes to [TP2]
EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
rebalanceListener.getValue().onPartitionsRevoked(Collections.singleton(TOPIC_PARTITION));
rebalanceListener.getValue().onPartitionsAssigned(Collections.emptySet());
return ConsumerRecords.empty();
});
EasyMock.expect(consumer.assignment()).andReturn(Collections.singleton(TOPIC_PARTITION)).times(2);
final Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 1));
sinkTask.preCommit(offsets);
EasyMock.expectLastCall().andReturn(offsets);
consumer.commitSync(offsets);
EasyMock.expectLastCall();
sinkTask.close(Collections.singleton(TOPIC_PARTITION));
EasyMock.expectLastCall();
sinkTask.put(Collections.emptyList());
EasyMock.expectLastCall();
// Fourth poll; assignment changes to [TP2, TP3]
EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
rebalanceListener.getValue().onPartitionsRevoked(Collections.emptySet());
rebalanceListener.getValue().onPartitionsAssigned(Collections.singleton(TOPIC_PARTITION3));
return ConsumerRecords.empty();
});
EasyMock.expect(consumer.assignment()).andReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))).times(2);
EasyMock.expect(consumer.position(TOPIC_PARTITION3)).andReturn(FIRST_OFFSET);
sinkTask.open(Collections.singleton(TOPIC_PARTITION3));
EasyMock.expectLastCall();
sinkTask.put(Collections.emptyList());
EasyMock.expectLastCall();
// Fifth poll; an offset commit takes place
EasyMock.expect(consumer.assignment()).andReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))).times(2);
final Map<TopicPartition, OffsetAndMetadata> workerCurrentOffsets = new HashMap<>();
workerCurrentOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
workerCurrentOffsets.put(TOPIC_PARTITION3, new OffsetAndMetadata(FIRST_OFFSET));
sinkTask.preCommit(workerCurrentOffsets);
EasyMock.expectLastCall().andThrow(new ConnectException("Failed to flush"));
consumer.seek(TOPIC_PARTITION2, FIRST_OFFSET);
EasyMock.expectLastCall();
consumer.seek(TOPIC_PARTITION3, FIRST_OFFSET);
EasyMock.expectLastCall();
expectConsumerPoll(0);
sinkTask.put(EasyMock.eq(Collections.emptyList()));
EasyMock.expectLastCall();
PowerMock.replayAll();
workerTask.initialize(TASK_CONFIG);
workerTask.initializeAndStart();
// First iteration--first call to poll, first consumer assignment
workerTask.iteration();
// Second iteration--second call to poll, delivery of one record
workerTask.iteration();
// Third iteration--third call to poll, partial consumer revocation
workerTask.iteration();
// Fourth iteration--fourth call to poll, partial consumer assignment
workerTask.iteration();
// Fifth iteration--task-requested offset commit with failure in SinkTask::preCommit
sinkTaskContext.getValue().requestCommit();
workerTask.iteration();
PowerMock.verifyAll();
}
use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class DistributedHerder method restartConnector.
@Override
public void restartConnector(final String connName, final Callback<Void> callback) {
addRequest(new Callable<Void>() {
@Override
public Void call() throws Exception {
if (checkRebalanceNeeded(callback))
return null;
if (!configState.connectors().contains(connName)) {
callback.onCompletion(new NotFoundException("Unknown connector: " + connName), null);
return null;
}
if (assignment.connectors().contains(connName)) {
try {
worker.stopConnector(connName);
if (startConnector(connName))
callback.onCompletion(null, null);
else
callback.onCompletion(new ConnectException("Failed to start connector: " + connName), null);
} catch (Throwable t) {
callback.onCompletion(t, null);
}
} else if (isLeader()) {
callback.onCompletion(new NotAssignedException("Cannot restart connector since it is not assigned to this member", member.ownerUrl(connName)), null);
} else {
callback.onCompletion(new NotLeaderException("Cannot restart connector since it is not assigned to this member", leaderUrl()), null);
}
return null;
}
}, forwardErrorCallback(callback));
}
use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class DistributedHerder method restartTask.
@Override
public void restartTask(final ConnectorTaskId id, final Callback<Void> callback) {
addRequest(new Callable<Void>() {
@Override
public Void call() throws Exception {
if (checkRebalanceNeeded(callback))
return null;
if (!configState.connectors().contains(id.connector())) {
callback.onCompletion(new NotFoundException("Unknown connector: " + id.connector()), null);
return null;
}
if (configState.taskConfig(id) == null) {
callback.onCompletion(new NotFoundException("Unknown task: " + id), null);
return null;
}
if (assignment.tasks().contains(id)) {
try {
worker.stopAndAwaitTask(id);
if (startTask(id))
callback.onCompletion(null, null);
else
callback.onCompletion(new ConnectException("Failed to start task: " + id), null);
} catch (Throwable t) {
callback.onCompletion(t, null);
}
} else if (isLeader()) {
callback.onCompletion(new NotAssignedException("Cannot restart task since it is not assigned to this member", member.ownerUrl(id)), null);
} else {
callback.onCompletion(new NotLeaderException("Cannot restart task since it is not assigned to this member", leaderUrl()), null);
}
return null;
}
}, forwardErrorCallback(callback));
}
use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class StandaloneHerder method putConnectorConfig.
@Override
public synchronized void putConnectorConfig(String connName, final Map<String, String> config, boolean allowReplace, final Callback<Created<ConnectorInfo>> callback) {
try {
ConfigInfos validatedConfig = validateConnectorConfig(config);
if (validatedConfig.errorCount() > 0) {
callback.onCompletion(new BadRequestException("Connector configuration is invalid " + "(use the endpoint `/{connectorType}/config/validate` to get a full list of errors)"), null);
return;
}
boolean created = false;
if (configState.contains(connName)) {
if (!allowReplace) {
callback.onCompletion(new AlreadyExistsException("Connector " + connName + " already exists"), null);
return;
}
worker.stopConnector(connName);
} else {
created = true;
}
if (!startConnector(config)) {
callback.onCompletion(new ConnectException("Failed to start connector: " + connName), null);
return;
}
updateConnectorTasks(connName);
callback.onCompletion(null, new Created<>(created, createConnectorInfo(connName)));
} catch (ConnectException e) {
callback.onCompletion(e, null);
}
}
use of org.apache.kafka.connect.errors.ConnectException in project kafka by apache.
the class Worker method buildWorkerTask.
private WorkerTask buildWorkerTask(ConnectorConfig connConfig, ConnectorTaskId id, Task task, TaskStatus.Listener statusListener, TargetState initialState, Converter keyConverter, Converter valueConverter) {
// Decide which type of worker task we need based on the type of task.
if (task instanceof SourceTask) {
TransformationChain<SourceRecord> transformationChain = new TransformationChain<>(connConfig.<SourceRecord>transformations());
OffsetStorageReader offsetReader = new OffsetStorageReaderImpl(offsetBackingStore, id.connector(), internalKeyConverter, internalValueConverter);
OffsetStorageWriter offsetWriter = new OffsetStorageWriter(offsetBackingStore, id.connector(), internalKeyConverter, internalValueConverter);
KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(producerProps);
return new WorkerSourceTask(id, (SourceTask) task, statusListener, initialState, keyConverter, valueConverter, transformationChain, producer, offsetReader, offsetWriter, config, time);
} else if (task instanceof SinkTask) {
TransformationChain<SinkRecord> transformationChain = new TransformationChain<>(connConfig.<SinkRecord>transformations());
return new WorkerSinkTask(id, (SinkTask) task, statusListener, initialState, config, keyConverter, valueConverter, transformationChain, time);
} else {
log.error("Tasks must be a subclass of either SourceTask or SinkTask", task);
throw new ConnectException("Tasks must be a subclass of either SourceTask or SinkTask");
}
}
Aggregations