use of org.apache.kafka.connect.storage.OffsetStorageReader in project kafka-connect-cdc-mssql by jcustenborder.
the class QueryServiceIT method queryTable.
private void queryTable(ChangeKey input) throws SQLException, IOException {
List<Change> expectedChanges;
String fileName = String.format("%s.%s.json", input.schemaName, input.tableName);
String resourceName = String.format("query/table/%s/%s", input.databaseName, fileName);
long timestamp = 0L;
try (InputStream stream = this.getClass().getResourceAsStream(resourceName)) {
Preconditions.checkNotNull(stream, "Could not find resource %s.", resourceName);
log.info("Loading expected changes from {}", resourceName);
expectedChanges = loadChanges(stream);
for (Change change : expectedChanges) {
timestamp = change.timestamp();
break;
}
}
OffsetStorageReader offsetStorageReader = mock(OffsetStorageReader.class);
TableMetadataProvider tableMetadataProvider = new MsSqlTableMetadataProvider(config, offsetStorageReader);
Time time = mock(Time.class);
ChangeWriter changeWriter = mock(ChangeWriter.class);
List<Change> actualChanges = new ArrayList<>(1000);
doAnswer(invocationOnMock -> {
Change change = invocationOnMock.getArgument(0);
actualChanges.add(change);
return null;
}).when(changeWriter).addChange(any());
QueryService queryService = new QueryService(time, tableMetadataProvider, config, changeWriter);
when(time.milliseconds()).thenReturn(timestamp);
queryService.queryTable(changeWriter, input);
verify(offsetStorageReader, only()).offset(anyMap());
verify(time, atLeastOnce()).milliseconds();
if (log.isDebugEnabled()) {
log.trace("Found {} change(s).", actualChanges.size());
}
assertFalse(actualChanges.isEmpty(), "Changes should have been returned.");
assertEquals(expectedChanges.size(), actualChanges.size(), "The number of actualChanges returned is not the expect count.");
for (int i = 0; i < expectedChanges.size(); i++) {
Change expectedChange = expectedChanges.get(i);
Change actualChange = actualChanges.get(i);
assertChange(expectedChange, actualChange);
}
}
use of org.apache.kafka.connect.storage.OffsetStorageReader in project apache-kafka-on-k8s by banzaicloud.
the class Worker method buildWorkerTask.
private WorkerTask buildWorkerTask(ConnectorConfig connConfig, ConnectorTaskId id, Task task, TaskStatus.Listener statusListener, TargetState initialState, Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter, ClassLoader loader) {
// Decide which type of worker task we need based on the type of task.
if (task instanceof SourceTask) {
TransformationChain<SourceRecord> transformationChain = new TransformationChain<>(connConfig.<SourceRecord>transformations());
OffsetStorageReader offsetReader = new OffsetStorageReaderImpl(offsetBackingStore, id.connector(), internalKeyConverter, internalValueConverter);
OffsetStorageWriter offsetWriter = new OffsetStorageWriter(offsetBackingStore, id.connector(), internalKeyConverter, internalValueConverter);
KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(producerProps);
return new WorkerSourceTask(id, (SourceTask) task, statusListener, initialState, keyConverter, valueConverter, headerConverter, transformationChain, producer, offsetReader, offsetWriter, config, metrics, loader, time);
} else if (task instanceof SinkTask) {
TransformationChain<SinkRecord> transformationChain = new TransformationChain<>(connConfig.<SinkRecord>transformations());
return new WorkerSinkTask(id, (SinkTask) task, statusListener, initialState, config, metrics, keyConverter, valueConverter, headerConverter, transformationChain, loader, time);
} else {
log.error("Tasks must be a subclass of either SourceTask or SinkTask", task);
throw new ConnectException("Tasks must be a subclass of either SourceTask or SinkTask");
}
}
use of org.apache.kafka.connect.storage.OffsetStorageReader in project debezium by debezium.
the class EmbeddedEngine method run.
/**
* Run this embedded connector and deliver database changes to the registered {@link Consumer}. This method blocks until
* the connector is stopped.
* <p>
* First, the method checks to see if this instance is currently {@link #run() running}, and if so immediately returns.
* <p>
* If the configuration is valid, this method starts the connector and starts polling the connector for change events.
* All messages are delivered in batches to the {@link Consumer} registered with this embedded connector. The batch size,
* polling
* frequency, and other parameters are controlled via configuration settings. This continues until this connector is
* {@link #stop() stopped}.
* <p>
* Note that there are two ways to stop a connector running on a thread: calling {@link #stop()} from another thread, or
* interrupting the thread (e.g., via {@link ExecutorService#shutdownNow()}).
* <p>
* This method can be called repeatedly as needed.
*/
@Override
public void run() {
if (runningThread.compareAndSet(null, Thread.currentThread())) {
final String engineName = config.getString(ENGINE_NAME);
final String connectorClassName = config.getString(CONNECTOR_CLASS);
final Optional<ConnectorCallback> connectorCallback = Optional.ofNullable(this.connectorCallback);
// Only one thread can be in this part of the method at a time ...
latch.countUp();
try {
if (!config.validateAndRecord(CONNECTOR_FIELDS, logger::error)) {
fail("Failed to start connector with invalid configuration (see logs for actual errors)");
return;
}
// Instantiate the connector ...
SourceConnector connector = null;
try {
@SuppressWarnings("unchecked") Class<? extends SourceConnector> connectorClass = (Class<SourceConnector>) classLoader.loadClass(connectorClassName);
connector = connectorClass.newInstance();
} catch (Throwable t) {
fail("Unable to instantiate connector class '" + connectorClassName + "'", t);
return;
}
// Instantiate the offset store ...
final String offsetStoreClassName = config.getString(OFFSET_STORAGE);
OffsetBackingStore offsetStore = null;
try {
@SuppressWarnings("unchecked") Class<? extends OffsetBackingStore> offsetStoreClass = (Class<OffsetBackingStore>) classLoader.loadClass(offsetStoreClassName);
offsetStore = offsetStoreClass.newInstance();
} catch (Throwable t) {
fail("Unable to instantiate OffsetBackingStore class '" + offsetStoreClassName + "'", t);
return;
}
// Initialize the offset store ...
try {
offsetStore.configure(workerConfig);
offsetStore.start();
} catch (Throwable t) {
fail("Unable to configure and start the '" + offsetStoreClassName + "' offset backing store", t);
return;
}
// Set up the offset commit policy ...
if (offsetCommitPolicy == null) {
offsetCommitPolicy = config.getInstance(EmbeddedEngine.OFFSET_COMMIT_POLICY, OffsetCommitPolicy.class, config);
}
// Initialize the connector using a context that does NOT respond to requests to reconfigure tasks ...
ConnectorContext context = new ConnectorContext() {
@Override
public void requestTaskReconfiguration() {
// Do nothing ...
}
@Override
public void raiseError(Exception e) {
fail(e.getMessage(), e);
}
};
connector.initialize(context);
OffsetStorageWriter offsetWriter = new OffsetStorageWriter(offsetStore, engineName, keyConverter, valueConverter);
OffsetStorageReader offsetReader = new OffsetStorageReaderImpl(offsetStore, engineName, keyConverter, valueConverter);
long commitTimeoutMs = config.getLong(OFFSET_COMMIT_TIMEOUT_MS);
try {
// Start the connector with the given properties and get the task configurations ...
connector.start(config.asMap());
connectorCallback.ifPresent(ConnectorCallback::connectorStarted);
List<Map<String, String>> taskConfigs = connector.taskConfigs(1);
Class<? extends Task> taskClass = connector.taskClass();
SourceTask task = null;
try {
task = (SourceTask) taskClass.newInstance();
} catch (IllegalAccessException | InstantiationException t) {
fail("Unable to instantiate connector's task class '" + taskClass.getName() + "'", t);
return;
}
try {
SourceTaskContext taskContext = () -> offsetReader;
task.initialize(taskContext);
task.start(taskConfigs.get(0));
connectorCallback.ifPresent(ConnectorCallback::taskStarted);
} catch (Throwable t) {
// Mask the passwords ...
Configuration config = Configuration.from(taskConfigs.get(0)).withMaskedPasswords();
String msg = "Unable to initialize and start connector's task class '" + taskClass.getName() + "' with config: " + config;
fail(msg, t);
return;
}
recordsSinceLastCommit = 0;
Throwable handlerError = null;
try {
timeOfLastCommitMillis = clock.currentTimeInMillis();
boolean keepProcessing = true;
List<SourceRecord> changeRecords = null;
while (runningThread.get() != null && handlerError == null && keepProcessing) {
try {
try {
logger.debug("Embedded engine is polling task for records on thread " + runningThread.get());
// blocks until there are values ...
changeRecords = task.poll();
logger.debug("Embedded engine returned from polling task for records");
} catch (InterruptedException e) {
// Interrupted while polling ...
logger.debug("Embedded engine interrupted on thread " + runningThread.get() + " while polling the task for records");
Thread.interrupted();
break;
}
try {
if (changeRecords != null && !changeRecords.isEmpty()) {
logger.debug("Received {} records from the task", changeRecords.size());
// First forward the records to the connector's consumer ...
for (SourceRecord record : changeRecords) {
try {
consumer.accept(record);
task.commitRecord(record);
} catch (StopConnectorException e) {
keepProcessing = false;
// Stop processing any more but first record the offset for this record's
// partition
offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
recordsSinceLastCommit += 1;
break;
} catch (Throwable t) {
handlerError = t;
break;
}
// Record the offset for this record's partition
offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
recordsSinceLastCommit += 1;
}
// Flush the offsets to storage if necessary ...
maybeFlush(offsetWriter, offsetCommitPolicy, commitTimeoutMs, task);
} else {
logger.debug("Received no records from the task");
}
} catch (Throwable t) {
// There was some sort of unexpected exception, so we should stop work
if (handlerError == null) {
// make sure we capture the error first so that we can report it later
handlerError = t;
}
break;
}
} finally {
// then try to commit the offsets, since we record them only after the records were handled
// by the consumer ...
maybeFlush(offsetWriter, offsetCommitPolicy, commitTimeoutMs, task);
}
}
} finally {
if (handlerError != null) {
// There was an error in the handler so make sure it's always captured...
fail("Stopping connector after error in the application's handler method: " + handlerError.getMessage(), handlerError);
}
try {
// First stop the task ...
logger.debug("Stopping the task and engine");
task.stop();
connectorCallback.ifPresent(ConnectorCallback::taskStopped);
// Always commit offsets that were captured from the source records we actually processed ...
commitOffsets(offsetWriter, commitTimeoutMs, task);
if (handlerError == null) {
// We stopped normally ...
succeed("Connector '" + connectorClassName + "' completed normally.");
}
} catch (Throwable t) {
fail("Error while trying to stop the task and commit the offsets", t);
}
}
} catch (Throwable t) {
fail("Error while trying to run connector class '" + connectorClassName + "'", t);
} finally {
// Close the offset storage and finally the connector ...
try {
offsetStore.stop();
} catch (Throwable t) {
fail("Error while trying to stop the offset store", t);
} finally {
try {
connector.stop();
connectorCallback.ifPresent(ConnectorCallback::connectorStopped);
} catch (Throwable t) {
fail("Error while trying to stop connector class '" + connectorClassName + "'", t);
}
}
}
} finally {
latch.countDown();
runningThread.set(null);
// after we've "shut down" the engine, fire the completion callback based on the results we collected
completionCallback.handle(completionResult.success(), completionResult.message(), completionResult.error());
}
}
}
use of org.apache.kafka.connect.storage.OffsetStorageReader in project kafka by apache.
the class Worker method startConnector.
/**
* Start a connector managed by this worker.
*
* @param connName the connector name.
* @param connProps the properties of the connector.
* @param ctx the connector runtime context.
* @param statusListener a listener for the runtime status transitions of the connector.
* @param initialState the initial state of the connector.
* @param onConnectorStateChange invoked when the initial state change of the connector is completed
*/
public void startConnector(String connName, Map<String, String> connProps, CloseableConnectorContext ctx, ConnectorStatus.Listener statusListener, TargetState initialState, Callback<TargetState> onConnectorStateChange) {
final ConnectorStatus.Listener connectorStatusListener = workerMetricsGroup.wrapStatusListener(statusListener);
try (LoggingContext loggingContext = LoggingContext.forConnector(connName)) {
if (connectors.containsKey(connName)) {
onConnectorStateChange.onCompletion(new ConnectException("Connector with name " + connName + " already exists"), null);
return;
}
final WorkerConnector workerConnector;
ClassLoader savedLoader = plugins.currentThreadLoader();
try {
// By the time we arrive here, CONNECTOR_CLASS_CONFIG has been validated already
// Getting this value from the unparsed map will allow us to instantiate the
// right config (source or sink)
final String connClass = connProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG);
ClassLoader connectorLoader = plugins.delegatingLoader().connectorLoader(connClass);
savedLoader = Plugins.compareAndSwapLoaders(connectorLoader);
log.info("Creating connector {} of type {}", connName, connClass);
final Connector connector = plugins.newConnector(connClass);
final ConnectorConfig connConfig = ConnectUtils.isSinkConnector(connector) ? new SinkConnectorConfig(plugins, connProps) : new SourceConnectorConfig(plugins, connProps, config.topicCreationEnable());
final OffsetStorageReader offsetReader = new OffsetStorageReaderImpl(offsetBackingStore, connName, internalKeyConverter, internalValueConverter);
workerConnector = new WorkerConnector(connName, connector, connConfig, ctx, metrics, connectorStatusListener, offsetReader, connectorLoader);
log.info("Instantiated connector {} with version {} of type {}", connName, connector.version(), connector.getClass());
workerConnector.transitionTo(initialState, onConnectorStateChange);
Plugins.compareAndSwapLoaders(savedLoader);
} catch (Throwable t) {
log.error("Failed to start connector {}", connName, t);
// Can't be put in a finally block because it needs to be swapped before the call on
// statusListener
Plugins.compareAndSwapLoaders(savedLoader);
connectorStatusListener.onFailure(connName, t);
onConnectorStateChange.onCompletion(t, null);
return;
}
WorkerConnector existing = connectors.putIfAbsent(connName, workerConnector);
if (existing != null) {
onConnectorStateChange.onCompletion(new ConnectException("Connector with name " + connName + " already exists"), null);
// shutdown() on it) here because it hasn't actually started running yet
return;
}
executor.submit(workerConnector);
log.info("Finished creating connector {}", connName);
}
}
use of org.apache.kafka.connect.storage.OffsetStorageReader in project kafka by apache.
the class Worker method buildWorkerTask.
private WorkerTask buildWorkerTask(ConnectorConfig connConfig, ConnectorTaskId id, Task task, TaskStatus.Listener statusListener, TargetState initialState, Converter keyConverter, Converter valueConverter) {
// Decide which type of worker task we need based on the type of task.
if (task instanceof SourceTask) {
TransformationChain<SourceRecord> transformationChain = new TransformationChain<>(connConfig.<SourceRecord>transformations());
OffsetStorageReader offsetReader = new OffsetStorageReaderImpl(offsetBackingStore, id.connector(), internalKeyConverter, internalValueConverter);
OffsetStorageWriter offsetWriter = new OffsetStorageWriter(offsetBackingStore, id.connector(), internalKeyConverter, internalValueConverter);
KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(producerProps);
return new WorkerSourceTask(id, (SourceTask) task, statusListener, initialState, keyConverter, valueConverter, transformationChain, producer, offsetReader, offsetWriter, config, time);
} else if (task instanceof SinkTask) {
TransformationChain<SinkRecord> transformationChain = new TransformationChain<>(connConfig.<SinkRecord>transformations());
return new WorkerSinkTask(id, (SinkTask) task, statusListener, initialState, config, keyConverter, valueConverter, transformationChain, time);
} else {
log.error("Tasks must be a subclass of either SourceTask or SinkTask", task);
throw new ConnectException("Tasks must be a subclass of either SourceTask or SinkTask");
}
}
Aggregations