use of org.apache.kafka.connect.util.FutureCallback in project kafka by apache.
the class ConnectStandalone method main.
public static void main(String[] args) throws Exception {
if (args.length < 2) {
log.info("Usage: ConnectStandalone worker.properties connector1.properties [connector2.properties ...]");
Exit.exit(1);
}
String workerPropsFile = args[0];
Map<String, String> workerProps = !workerPropsFile.isEmpty() ? Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.<String, String>emptyMap();
Time time = Time.SYSTEM;
ConnectorFactory connectorFactory = new ConnectorFactory();
StandaloneConfig config = new StandaloneConfig(workerProps);
RestServer rest = new RestServer(config);
URI advertisedUrl = rest.advertisedUrl();
String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();
Worker worker = new Worker(workerId, time, connectorFactory, config, new FileOffsetBackingStore());
Herder herder = new StandaloneHerder(worker);
final Connect connect = new Connect(herder, rest);
try {
connect.start();
for (final String connectorPropsFile : Arrays.copyOfRange(args, 1, args.length)) {
Map<String, String> connectorProps = Utils.propsToStringMap(Utils.loadProps(connectorPropsFile));
FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(new Callback<Herder.Created<ConnectorInfo>>() {
@Override
public void onCompletion(Throwable error, Herder.Created<ConnectorInfo> info) {
if (error != null)
log.error("Failed to create job for {}", connectorPropsFile);
else
log.info("Created connector {}", info.result().name());
}
});
herder.putConnectorConfig(connectorProps.get(ConnectorConfig.NAME_CONFIG), connectorProps, false, cb);
cb.get();
}
} catch (Throwable t) {
log.error("Stopping after connector error", t);
connect.stop();
}
// Shutdown will be triggered by Ctrl-C or via HTTP shutdown request
connect.awaitStop();
}
use of org.apache.kafka.connect.util.FutureCallback in project kafka by apache.
the class ConnectorsResource method restartTask.
@POST
@Path("/{connector}/tasks/{task}/restart")
public void restartTask(@PathParam("connector") final String connector, @PathParam("task") final Integer task, @QueryParam("forward") final Boolean forward) throws Throwable {
FutureCallback<Void> cb = new FutureCallback<>();
ConnectorTaskId taskId = new ConnectorTaskId(connector, task);
herder.restartTask(taskId, cb);
completeOrForwardRequest(cb, "/connectors/" + connector + "/tasks/" + task + "/restart", "POST", null, forward);
}
use of org.apache.kafka.connect.util.FutureCallback in project kafka by apache.
the class ConnectorsResource method putConnectorConfig.
@PUT
@Path("/{connector}/config")
public Response putConnectorConfig(@PathParam("connector") final String connector, @QueryParam("forward") final Boolean forward, final Map<String, String> connectorConfig) throws Throwable {
FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>();
String includedName = connectorConfig.get(ConnectorConfig.NAME_CONFIG);
if (includedName != null) {
if (!includedName.equals(connector))
throw new BadRequestException("Connector name configuration (" + includedName + ") doesn't match connector name in the URL (" + connector + ")");
} else {
connectorConfig.put(ConnectorConfig.NAME_CONFIG, connector);
}
herder.putConnectorConfig(connector, connectorConfig, true, cb);
Herder.Created<ConnectorInfo> createdInfo = completeOrForwardRequest(cb, "/connectors/" + connector + "/config", "PUT", connectorConfig, new TypeReference<ConnectorInfo>() {
}, new CreatedConnectorInfoTranslator(), forward);
Response.ResponseBuilder response;
if (createdInfo.created())
response = Response.created(URI.create("/connectors/" + connector));
else
response = Response.ok();
return response.entity(createdInfo.result()).build();
}
use of org.apache.kafka.connect.util.FutureCallback in project ignite by apache.
the class IgniteSinkConnectorTest method testSinkPuts.
/**
* Tests the whole data flow from injecting data to Kafka to transferring it to the grid. It reads from two
* specified Kafka topics, because a sink task can read from multiple topics.
*
* @param sinkProps Sink properties.
* @param keyless Tests on Kafka stream with null keys if true.
* @throws Exception Thrown in case of the failure.
*/
private void testSinkPuts(Map<String, String> sinkProps, boolean keyless) throws Exception {
FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(new Callback<Herder.Created<ConnectorInfo>>() {
@Override
public void onCompletion(Throwable error, Herder.Created<ConnectorInfo> info) {
if (error != null)
throw new RuntimeException("Failed to create a job!");
}
});
herder.putConnectorConfig(sinkProps.get(ConnectorConfig.NAME_CONFIG), sinkProps, false, cb);
cb.get();
final CountDownLatch latch = new CountDownLatch(EVENT_CNT * TOPICS.length);
final IgnitePredicate<Event> putLsnr = new IgnitePredicate<Event>() {
@Override
public boolean apply(Event evt) {
assert evt != null;
latch.countDown();
return true;
}
};
grid.events(grid.cluster().forCacheNodes(CACHE_NAME)).localListen(putLsnr, EVT_CACHE_OBJECT_PUT);
IgniteCache<String, String> cache = grid.cache(CACHE_NAME);
assertEquals(0, cache.size(CachePeekMode.PRIMARY));
Map<String, String> keyValMap = new HashMap<>(EVENT_CNT * TOPICS.length);
// Produces events for the specified number of topics
for (String topic : TOPICS) keyValMap.putAll(produceStream(topic, keyless));
// Checks all events successfully processed in 10 seconds.
assertTrue(latch.await(10, TimeUnit.SECONDS));
grid.events(grid.cluster().forCacheNodes(CACHE_NAME)).stopLocalListen(putLsnr);
// Checks that each event was processed properly.
for (Map.Entry<String, String> entry : keyValMap.entrySet()) assertEquals(entry.getValue(), cache.get(entry.getKey()));
assertEquals(EVENT_CNT * TOPICS.length, cache.size(CachePeekMode.PRIMARY));
}
use of org.apache.kafka.connect.util.FutureCallback in project ignite by apache.
the class IgniteSourceConnectorTest method doTest.
/**
* Tests the source with the specified source configurations.
*
* @param srcProps Source properties.
* @param conditioned Flag indicating whether filtering is enabled.
* @throws Exception Fails if error.
*/
private void doTest(Map<String, String> srcProps, boolean conditioned) throws Exception {
FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(new Callback<Herder.Created<ConnectorInfo>>() {
@Override
public void onCompletion(Throwable error, Herder.Created<ConnectorInfo> info) {
if (error != null)
throw new RuntimeException("Failed to create a job!", error);
}
});
herder.putConnectorConfig(srcProps.get(ConnectorConfig.NAME_CONFIG), srcProps, true, cb);
cb.get();
// Ugh! To be sure Kafka Connect's worker thread is properly started...
Thread.sleep(5000);
final CountDownLatch latch = new CountDownLatch(EVENT_CNT);
final IgnitePredicate<CacheEvent> locLsnr = new IgnitePredicate<CacheEvent>() {
@Override
public boolean apply(CacheEvent evt) {
assert evt != null;
latch.countDown();
return true;
}
};
grid.events(grid.cluster().forCacheNodes(CACHE_NAME)).localListen(locLsnr, EVT_CACHE_OBJECT_PUT);
IgniteCache<String, String> cache = grid.cache(CACHE_NAME);
assertEquals(0, cache.size(CachePeekMode.PRIMARY));
Map<String, String> keyValMap = new HashMap<>(EVENT_CNT);
keyValMap.putAll(sendData());
// Checks all events are processed.
assertTrue(latch.await(10, TimeUnit.SECONDS));
grid.events(grid.cluster().forCacheNodes(CACHE_NAME)).stopLocalListen(locLsnr);
assertEquals(EVENT_CNT, cache.size(CachePeekMode.PRIMARY));
// Checks the events are transferred to Kafka broker.
if (conditioned)
checkDataDelivered(EVENT_CNT * TOPICS.length / 2);
else
checkDataDelivered(EVENT_CNT * TOPICS.length);
}
Aggregations