use of org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster in project kafka by apache.
the class ConnectorClientPolicyIntegrationTest method connectClusterWithPolicy.
private EmbeddedConnectCluster connectClusterWithPolicy(String policy) throws InterruptedException {
// setup Connect worker properties
Map<String, String> workerProps = new HashMap<>();
workerProps.put(OFFSET_COMMIT_INTERVAL_MS_CONFIG, String.valueOf(5_000));
if (policy != null) {
workerProps.put(WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG, policy);
}
// setup Kafka broker properties
Properties exampleBrokerProps = new Properties();
exampleBrokerProps.put("auto.create.topics.enable", "false");
// build a Connect cluster backed by Kafka and Zk
EmbeddedConnectCluster connect = new EmbeddedConnectCluster.Builder().name("connect-cluster").numWorkers(NUM_WORKERS).numBrokers(1).workerProps(workerProps).brokerProps(exampleBrokerProps).build();
// start the clusters
connect.start();
connect.assertions().assertAtLeastNumWorkersAreUp(NUM_WORKERS, "Initial group of workers did not start in time.");
return connect;
}
use of org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster in project kafka by apache.
the class MirrorConnectorsIntegrationBaseTest method waitForConsumerGroupOffsetSync.
/*
* given consumer group, topics and expected number of records, make sure the consumer group
* offsets are eventually synced to the expected offset numbers
*/
protected static <T> void waitForConsumerGroupOffsetSync(EmbeddedConnectCluster connect, Consumer<T, T> consumer, List<String> topics, String consumerGroupId, int numRecords) throws InterruptedException {
try (Admin adminClient = connect.kafka().createAdminClient()) {
List<TopicPartition> tps = new ArrayList<>(NUM_PARTITIONS * topics.size());
for (int partitionIndex = 0; partitionIndex < NUM_PARTITIONS; partitionIndex++) {
for (String topic : topics) {
tps.add(new TopicPartition(topic, partitionIndex));
}
}
long expectedTotalOffsets = numRecords * topics.size();
waitForCondition(() -> {
Map<TopicPartition, OffsetAndMetadata> consumerGroupOffsets = adminClient.listConsumerGroupOffsets(consumerGroupId).partitionsToOffsetAndMetadata().get();
long consumerGroupOffsetTotal = consumerGroupOffsets.values().stream().mapToLong(OffsetAndMetadata::offset).sum();
Map<TopicPartition, Long> offsets = consumer.endOffsets(tps, CONSUMER_POLL_TIMEOUT_MS);
long totalOffsets = offsets.values().stream().mapToLong(l -> l).sum();
// make sure the consumer group offsets are synced to expected number
return totalOffsets == expectedTotalOffsets && consumerGroupOffsetTotal > 0;
}, OFFSET_SYNC_DURATION_MS, "Consumer group offset sync is not complete in time");
}
}
use of org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster in project kafka by apache.
the class ConnectorClientPolicyIntegrationTest method assertFailCreateConnector.
private void assertFailCreateConnector(String policy, Map<String, String> props) throws InterruptedException {
EmbeddedConnectCluster connect = connectClusterWithPolicy(policy);
try {
connect.configureConnector(CONNECTOR_NAME, props);
fail("Shouldn't be able to create connector");
} catch (ConnectRestException e) {
assertEquals(e.statusCode(), 400);
} finally {
connect.stop();
}
}
use of org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster in project kafka by apache.
the class ConnectorClientPolicyIntegrationTest method assertPassCreateConnector.
private void assertPassCreateConnector(String policy, Map<String, String> props) throws InterruptedException {
EmbeddedConnectCluster connect = connectClusterWithPolicy(policy);
try {
connect.configureConnector(CONNECTOR_NAME, props);
connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(CONNECTOR_NAME, NUM_TASKS, "Connector tasks did not start in time.");
} catch (ConnectRestException e) {
fail("Should be able to create connector");
} finally {
connect.stop();
}
}
use of org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster in project kafka by apache.
the class ConnectorRestartApiIntegrationTest method startOrReuseConnectWithNumWorkers.
private void startOrReuseConnectWithNumWorkers(int numWorkers) throws Exception {
connect = connectClusterMap.computeIfAbsent(numWorkers, n -> {
// setup Connect worker properties
Map<String, String> workerProps = new HashMap<>();
workerProps.put(OFFSET_COMMIT_INTERVAL_MS_CONFIG, String.valueOf(OFFSET_COMMIT_INTERVAL_MS));
workerProps.put(CONNECTOR_CLIENT_POLICY_CLASS_CONFIG, "All");
// setup Kafka broker properties
Properties brokerProps = new Properties();
brokerProps.put("auto.create.topics.enable", String.valueOf(false));
EmbeddedConnectCluster.Builder connectBuilder = new EmbeddedConnectCluster.Builder().name("connect-cluster").numWorkers(numWorkers).workerProps(workerProps).brokerProps(brokerProps).maskExitProcedures(true);
EmbeddedConnectCluster connect = connectBuilder.build();
// start the clusters
connect.start();
return connect;
});
connect.assertions().assertExactlyNumWorkersAreUp(numWorkers, "Initial group of workers did not start in time.");
}
Aggregations