use of org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo in project ignite by apache.
the class IgniteSinkConnectorTest method testSinkPuts.
/**
* Tests the whole data flow from injecting data to Kafka to transferring it to the grid. It reads from two
* specified Kafka topics, because a sink task can read from multiple topics.
*
* @param sinkProps Sink properties.
* @param keyless Tests on Kafka stream with null keys if true.
* @throws Exception Thrown in case of the failure.
*/
private void testSinkPuts(Map<String, String> sinkProps, boolean keyless) throws Exception {
FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(new Callback<Herder.Created<ConnectorInfo>>() {
@Override
public void onCompletion(Throwable error, Herder.Created<ConnectorInfo> info) {
if (error != null)
throw new RuntimeException("Failed to create a job!");
}
});
herder.putConnectorConfig(sinkProps.get(ConnectorConfig.NAME_CONFIG), sinkProps, false, cb);
cb.get();
final CountDownLatch latch = new CountDownLatch(EVENT_CNT * TOPICS.length);
final IgnitePredicate<Event> putLsnr = new IgnitePredicate<Event>() {
@Override
public boolean apply(Event evt) {
assert evt != null;
latch.countDown();
return true;
}
};
grid.events(grid.cluster().forCacheNodes(CACHE_NAME)).localListen(putLsnr, EVT_CACHE_OBJECT_PUT);
IgniteCache<String, String> cache = grid.cache(CACHE_NAME);
assertEquals(0, cache.size(CachePeekMode.PRIMARY));
Map<String, String> keyValMap = new HashMap<>(EVENT_CNT * TOPICS.length);
// Produces events for the specified number of topics
for (String topic : TOPICS) keyValMap.putAll(produceStream(topic, keyless));
// Checks all events successfully processed in 10 seconds.
assertTrue(latch.await(10, TimeUnit.SECONDS));
grid.events(grid.cluster().forCacheNodes(CACHE_NAME)).stopLocalListen(putLsnr);
// Checks that each event was processed properly.
for (Map.Entry<String, String> entry : keyValMap.entrySet()) assertEquals(entry.getValue(), cache.get(entry.getKey()));
assertEquals(EVENT_CNT * TOPICS.length, cache.size(CachePeekMode.PRIMARY));
}
use of org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo in project ignite by apache.
the class IgniteSourceConnectorTest method doTest.
/**
* Tests the source with the specified source configurations.
*
* @param srcProps Source properties.
* @param conditioned Flag indicating whether filtering is enabled.
* @throws Exception Fails if error.
*/
private void doTest(Map<String, String> srcProps, boolean conditioned) throws Exception {
FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(new Callback<Herder.Created<ConnectorInfo>>() {
@Override
public void onCompletion(Throwable error, Herder.Created<ConnectorInfo> info) {
if (error != null)
throw new RuntimeException("Failed to create a job!", error);
}
});
herder.putConnectorConfig(srcProps.get(ConnectorConfig.NAME_CONFIG), srcProps, true, cb);
cb.get();
// Ugh! To be sure Kafka Connect's worker thread is properly started...
Thread.sleep(5000);
final CountDownLatch latch = new CountDownLatch(EVENT_CNT);
final IgnitePredicate<CacheEvent> locLsnr = new IgnitePredicate<CacheEvent>() {
@Override
public boolean apply(CacheEvent evt) {
assert evt != null;
latch.countDown();
return true;
}
};
grid.events(grid.cluster().forCacheNodes(CACHE_NAME)).localListen(locLsnr, EVT_CACHE_OBJECT_PUT);
IgniteCache<String, String> cache = grid.cache(CACHE_NAME);
assertEquals(0, cache.size(CachePeekMode.PRIMARY));
Map<String, String> keyValMap = new HashMap<>(EVENT_CNT);
keyValMap.putAll(sendData());
// Checks all events are processed.
assertTrue(latch.await(10, TimeUnit.SECONDS));
grid.events(grid.cluster().forCacheNodes(CACHE_NAME)).stopLocalListen(locLsnr);
assertEquals(EVENT_CNT, cache.size(CachePeekMode.PRIMARY));
// Checks the events are transferred to Kafka broker.
if (conditioned)
checkDataDelivered(EVENT_CNT * TOPICS.length / 2);
else
checkDataDelivered(EVENT_CNT * TOPICS.length);
}
use of org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo in project apache-kafka-on-k8s by banzaicloud.
the class DistributedHerderTest method testCreateConnector.
@Test
public void testCreateConnector() throws Exception {
EasyMock.expect(member.memberId()).andStubReturn("leader");
expectRebalance(1, Collections.<String>emptyList(), Collections.<ConnectorTaskId>emptyList());
expectPostRebalanceCatchup(SNAPSHOT);
member.wakeup();
PowerMock.expectLastCall();
// config validation
Connector connectorMock = PowerMock.createMock(SourceConnector.class);
EasyMock.expect(worker.getPlugins()).andReturn(plugins).times(3);
EasyMock.expect(plugins.compareAndSwapLoaders(connectorMock)).andReturn(delegatingLoader);
EasyMock.expect(plugins.newConnector(EasyMock.anyString())).andReturn(connectorMock);
EasyMock.expect(connectorMock.config()).andReturn(new ConfigDef());
EasyMock.expect(connectorMock.validate(CONN2_CONFIG)).andReturn(new Config(Collections.<ConfigValue>emptyList()));
EasyMock.expect(Plugins.compareAndSwapLoaders(delegatingLoader)).andReturn(pluginLoader);
// CONN2 is new, should succeed
configBackingStore.putConnectorConfig(CONN2, CONN2_CONFIG);
PowerMock.expectLastCall();
ConnectorInfo info = new ConnectorInfo(CONN2, CONN2_CONFIG, Collections.<ConnectorTaskId>emptyList(), ConnectorType.SOURCE);
putConnectorCallback.onCompletion(null, new Herder.Created<>(true, info));
PowerMock.expectLastCall();
member.poll(EasyMock.anyInt());
PowerMock.expectLastCall();
// No immediate action besides this -- change will be picked up via the config log
PowerMock.replayAll();
herder.putConnectorConfig(CONN2, CONN2_CONFIG, false, putConnectorCallback);
herder.tick();
time.sleep(1000L);
assertStatistics(3, 1, 100, 1000L);
PowerMock.verifyAll();
}
use of org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo in project apache-kafka-on-k8s by banzaicloud.
the class DistributedHerderTest method testCreateConnectorFailedBasicValidation.
@Test
public void testCreateConnectorFailedBasicValidation() throws Exception {
EasyMock.expect(member.memberId()).andStubReturn("leader");
expectRebalance(1, Collections.<String>emptyList(), Collections.<ConnectorTaskId>emptyList());
expectPostRebalanceCatchup(SNAPSHOT);
HashMap<String, String> config = new HashMap<>(CONN2_CONFIG);
config.remove(ConnectorConfig.NAME_CONFIG);
member.wakeup();
PowerMock.expectLastCall();
// config validation
Connector connectorMock = PowerMock.createMock(SourceConnector.class);
EasyMock.expect(worker.getPlugins()).andReturn(plugins).times(3);
EasyMock.expect(plugins.compareAndSwapLoaders(connectorMock)).andReturn(delegatingLoader);
EasyMock.expect(plugins.newConnector(EasyMock.anyString())).andReturn(connectorMock);
EasyMock.expect(connectorMock.config()).andStubReturn(new ConfigDef());
ConfigValue validatedValue = new ConfigValue("foo.bar");
EasyMock.expect(connectorMock.validate(config)).andReturn(new Config(singletonList(validatedValue)));
EasyMock.expect(Plugins.compareAndSwapLoaders(delegatingLoader)).andReturn(pluginLoader);
// CONN2 creation should fail
Capture<Throwable> error = EasyMock.newCapture();
putConnectorCallback.onCompletion(EasyMock.capture(error), EasyMock.<Herder.Created<ConnectorInfo>>isNull());
PowerMock.expectLastCall();
member.poll(EasyMock.anyInt());
PowerMock.expectLastCall();
// No immediate action besides this -- change will be picked up via the config log
PowerMock.replayAll();
herder.putConnectorConfig(CONN2, config, false, putConnectorCallback);
herder.tick();
assertTrue(error.hasCaptured());
assertTrue(error.getValue() instanceof BadRequestException);
time.sleep(1000L);
assertStatistics(3, 1, 100, 1000L);
PowerMock.verifyAll();
}
use of org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo in project apache-kafka-on-k8s by banzaicloud.
the class DistributedHerderTest method testCreateConnectorFailedCustomValidation.
@Test
public void testCreateConnectorFailedCustomValidation() throws Exception {
EasyMock.expect(member.memberId()).andStubReturn("leader");
expectRebalance(1, Collections.<String>emptyList(), Collections.<ConnectorTaskId>emptyList());
expectPostRebalanceCatchup(SNAPSHOT);
member.wakeup();
PowerMock.expectLastCall();
// config validation
Connector connectorMock = PowerMock.createMock(SourceConnector.class);
EasyMock.expect(worker.getPlugins()).andReturn(plugins).times(3);
EasyMock.expect(plugins.compareAndSwapLoaders(connectorMock)).andReturn(delegatingLoader);
EasyMock.expect(plugins.newConnector(EasyMock.anyString())).andReturn(connectorMock);
ConfigDef configDef = new ConfigDef();
configDef.define("foo.bar", ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, "foo.bar doc");
EasyMock.expect(connectorMock.config()).andReturn(configDef);
ConfigValue validatedValue = new ConfigValue("foo.bar");
validatedValue.addErrorMessage("Failed foo.bar validation");
EasyMock.expect(connectorMock.validate(CONN2_CONFIG)).andReturn(new Config(singletonList(validatedValue)));
EasyMock.expect(Plugins.compareAndSwapLoaders(delegatingLoader)).andReturn(pluginLoader);
// CONN2 creation should fail
Capture<Throwable> error = EasyMock.newCapture();
putConnectorCallback.onCompletion(EasyMock.capture(error), EasyMock.<Herder.Created<ConnectorInfo>>isNull());
PowerMock.expectLastCall();
member.poll(EasyMock.anyInt());
PowerMock.expectLastCall();
// No immediate action besides this -- change will be picked up via the config log
PowerMock.replayAll();
herder.putConnectorConfig(CONN2, CONN2_CONFIG, false, putConnectorCallback);
herder.tick();
assertTrue(error.hasCaptured());
assertTrue(error.getValue() instanceof BadRequestException);
time.sleep(1000L);
assertStatistics(3, 1, 100, 1000L);
PowerMock.verifyAll();
}
Aggregations