Search in sources :

Example 1 with EmbeddedConnectCluster

use of org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster in project kafka by apache.

the class ConnectorClientPolicyIntegrationTest method connectClusterWithPolicy.

private EmbeddedConnectCluster connectClusterWithPolicy(String policy) throws InterruptedException {
    // setup Connect worker properties
    Map<String, String> workerProps = new HashMap<>();
    workerProps.put(OFFSET_COMMIT_INTERVAL_MS_CONFIG, String.valueOf(5_000));
    if (policy != null) {
        workerProps.put(WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG, policy);
    }
    // setup Kafka broker properties
    Properties exampleBrokerProps = new Properties();
    exampleBrokerProps.put("auto.create.topics.enable", "false");
    // build a Connect cluster backed by Kafka and Zk
    EmbeddedConnectCluster connect = new EmbeddedConnectCluster.Builder().name("connect-cluster").numWorkers(NUM_WORKERS).numBrokers(1).workerProps(workerProps).brokerProps(exampleBrokerProps).build();
    // start the clusters
    connect.start();
    connect.assertions().assertAtLeastNumWorkersAreUp(NUM_WORKERS, "Initial group of workers did not start in time.");
    return connect;
}
Also used : HashMap(java.util.HashMap) EmbeddedConnectCluster(org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster) Properties(java.util.Properties)

Example 2 with EmbeddedConnectCluster

use of org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster in project kafka by apache.

the class MirrorConnectorsIntegrationBaseTest method waitForConsumerGroupOffsetSync.

/*
     * given consumer group, topics and expected number of records, make sure the consumer group
     * offsets are eventually synced to the expected offset numbers
     */
protected static <T> void waitForConsumerGroupOffsetSync(EmbeddedConnectCluster connect, Consumer<T, T> consumer, List<String> topics, String consumerGroupId, int numRecords) throws InterruptedException {
    try (Admin adminClient = connect.kafka().createAdminClient()) {
        List<TopicPartition> tps = new ArrayList<>(NUM_PARTITIONS * topics.size());
        for (int partitionIndex = 0; partitionIndex < NUM_PARTITIONS; partitionIndex++) {
            for (String topic : topics) {
                tps.add(new TopicPartition(topic, partitionIndex));
            }
        }
        long expectedTotalOffsets = numRecords * topics.size();
        waitForCondition(() -> {
            Map<TopicPartition, OffsetAndMetadata> consumerGroupOffsets = adminClient.listConsumerGroupOffsets(consumerGroupId).partitionsToOffsetAndMetadata().get();
            long consumerGroupOffsetTotal = consumerGroupOffsets.values().stream().mapToLong(OffsetAndMetadata::offset).sum();
            Map<TopicPartition, Long> offsets = consumer.endOffsets(tps, CONSUMER_POLL_TIMEOUT_MS);
            long totalOffsets = offsets.values().stream().mapToLong(l -> l).sum();
            // make sure the consumer group offsets are synced to expected number
            return totalOffsets == expectedTotalOffsets && consumerGroupOffsetTotal > 0;
        }, OFFSET_SYNC_DURATION_MS, "Consumer group offset sync is not complete in time");
    }
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) Arrays(java.util.Arrays) DescribeConfigsResult(org.apache.kafka.clients.admin.DescribeConfigsResult) MirrorSourceConnector(org.apache.kafka.connect.mirror.MirrorSourceConnector) LoggerFactory(org.slf4j.LoggerFactory) Assertions.assertNotEquals(org.junit.jupiter.api.Assertions.assertNotEquals) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Duration(java.time.Duration) Map(java.util.Map) Tag(org.junit.jupiter.api.Tag) ReplicationPolicy(org.apache.kafka.connect.mirror.ReplicationPolicy) TopicConfig(org.apache.kafka.common.config.TopicConfig) EmbeddedConnectCluster(org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) TestUtils.generateRecords(org.apache.kafka.connect.mirror.TestUtils.generateRecords) MirrorMakerConfig(org.apache.kafka.connect.mirror.MirrorMakerConfig) Collection(java.util.Collection) Set(java.util.Set) MirrorCheckpointConnector(org.apache.kafka.connect.mirror.MirrorCheckpointConnector) Test(org.junit.jupiter.api.Test) List(java.util.List) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Config(org.apache.kafka.clients.admin.Config) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) Connector(org.apache.kafka.connect.connector.Connector) Exit(org.apache.kafka.common.utils.Exit) UngracefulShutdownException(org.apache.kafka.connect.util.clusters.UngracefulShutdownException) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) DefaultConfigPropertyFilter(org.apache.kafka.connect.mirror.DefaultConfigPropertyFilter) ConfigResource(org.apache.kafka.common.config.ConfigResource) Checkpoint(org.apache.kafka.connect.mirror.Checkpoint) SourceAndTarget(org.apache.kafka.connect.mirror.SourceAndTarget) Admin(org.apache.kafka.clients.admin.Admin) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) EmbeddedKafkaCluster(org.apache.kafka.connect.util.clusters.EmbeddedKafkaCluster) Properties(java.util.Properties) Logger(org.slf4j.Logger) TestUtils.waitForCondition(org.apache.kafka.test.TestUtils.waitForCondition) AdminClientConfig(org.apache.kafka.clients.admin.AdminClientConfig) MirrorClient(org.apache.kafka.connect.mirror.MirrorClient) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) MirrorHeartbeatConnector(org.apache.kafka.connect.mirror.MirrorHeartbeatConnector) Collections(java.util.Collections) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Admin(org.apache.kafka.clients.admin.Admin) Checkpoint(org.apache.kafka.connect.mirror.Checkpoint)

Example 3 with EmbeddedConnectCluster

use of org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster in project kafka by apache.

the class ConnectorClientPolicyIntegrationTest method assertFailCreateConnector.

private void assertFailCreateConnector(String policy, Map<String, String> props) throws InterruptedException {
    EmbeddedConnectCluster connect = connectClusterWithPolicy(policy);
    try {
        connect.configureConnector(CONNECTOR_NAME, props);
        fail("Shouldn't be able to create connector");
    } catch (ConnectRestException e) {
        assertEquals(e.statusCode(), 400);
    } finally {
        connect.stop();
    }
}
Also used : ConnectRestException(org.apache.kafka.connect.runtime.rest.errors.ConnectRestException) EmbeddedConnectCluster(org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster)

Example 4 with EmbeddedConnectCluster

use of org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster in project kafka by apache.

the class ConnectorClientPolicyIntegrationTest method assertPassCreateConnector.

private void assertPassCreateConnector(String policy, Map<String, String> props) throws InterruptedException {
    EmbeddedConnectCluster connect = connectClusterWithPolicy(policy);
    try {
        connect.configureConnector(CONNECTOR_NAME, props);
        connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(CONNECTOR_NAME, NUM_TASKS, "Connector tasks did not start in time.");
    } catch (ConnectRestException e) {
        fail("Should be able to create connector");
    } finally {
        connect.stop();
    }
}
Also used : ConnectRestException(org.apache.kafka.connect.runtime.rest.errors.ConnectRestException) EmbeddedConnectCluster(org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster)

Example 5 with EmbeddedConnectCluster

use of org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster in project kafka by apache.

the class ConnectorRestartApiIntegrationTest method startOrReuseConnectWithNumWorkers.

private void startOrReuseConnectWithNumWorkers(int numWorkers) throws Exception {
    connect = connectClusterMap.computeIfAbsent(numWorkers, n -> {
        // setup Connect worker properties
        Map<String, String> workerProps = new HashMap<>();
        workerProps.put(OFFSET_COMMIT_INTERVAL_MS_CONFIG, String.valueOf(OFFSET_COMMIT_INTERVAL_MS));
        workerProps.put(CONNECTOR_CLIENT_POLICY_CLASS_CONFIG, "All");
        // setup Kafka broker properties
        Properties brokerProps = new Properties();
        brokerProps.put("auto.create.topics.enable", String.valueOf(false));
        EmbeddedConnectCluster.Builder connectBuilder = new EmbeddedConnectCluster.Builder().name("connect-cluster").numWorkers(numWorkers).workerProps(workerProps).brokerProps(brokerProps).maskExitProcedures(true);
        EmbeddedConnectCluster connect = connectBuilder.build();
        // start the clusters
        connect.start();
        return connect;
    });
    connect.assertions().assertExactlyNumWorkersAreUp(numWorkers, "Initial group of workers did not start in time.");
}
Also used : PARTITIONS_CONFIG(org.apache.kafka.connect.runtime.TopicCreationConfig.PARTITIONS_CONFIG) TestRule(org.junit.rules.TestRule) LoggerFactory(org.slf4j.LoggerFactory) IntegrationTest(org.apache.kafka.test.IntegrationTest) HashMap(java.util.HashMap) TOPIC_CONFIG(org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG) HashSet(java.util.HashSet) TestName(org.junit.rules.TestName) After(org.junit.After) Map(java.util.Map) OFFSET_COMMIT_INTERVAL_MS_CONFIG(org.apache.kafka.connect.runtime.WorkerConfig.OFFSET_COMMIT_INTERVAL_MS_CONFIG) StringConverter(org.apache.kafka.connect.storage.StringConverter) EmbeddedConnectCluster(org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster) CONNECTOR_CLASS_CONFIG(org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG) VALUE_CONVERTER_CLASS_CONFIG(org.apache.kafka.connect.runtime.ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG) Before(org.junit.Before) AfterClass(org.junit.AfterClass) Logger(org.slf4j.Logger) Properties(java.util.Properties) AbstractStatus(org.apache.kafka.connect.runtime.AbstractStatus) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) Category(org.junit.experimental.categories.Category) Collectors(java.util.stream.Collectors) DEFAULT_TOPIC_CREATION_PREFIX(org.apache.kafka.connect.runtime.TopicCreationConfig.DEFAULT_TOPIC_CREATION_PREFIX) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) KEY_CONVERTER_CLASS_CONFIG(org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG) TimeUnit(java.util.concurrent.TimeUnit) ConnectorStateInfo(org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo) TASKS_MAX_CONFIG(org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG) Rule(org.junit.Rule) Response(javax.ws.rs.core.Response) REPLICATION_FACTOR_CONFIG(org.apache.kafka.connect.runtime.TopicCreationConfig.REPLICATION_FACTOR_CONFIG) CONNECTOR_SETUP_DURATION_MS(org.apache.kafka.connect.util.clusters.EmbeddedConnectClusterAssertions.CONNECTOR_SETUP_DURATION_MS) CONNECTOR_CLIENT_POLICY_CLASS_CONFIG(org.apache.kafka.connect.runtime.WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) EmbeddedConnectCluster(org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster) Properties(java.util.Properties) HashMap(java.util.HashMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Aggregations

EmbeddedConnectCluster (org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster)5 HashMap (java.util.HashMap)3 Properties (java.util.Properties)3 Collections (java.util.Collections)2 Map (java.util.Map)2 Set (java.util.Set)2 TimeUnit (java.util.concurrent.TimeUnit)2 ConnectRestException (org.apache.kafka.connect.runtime.rest.errors.ConnectRestException)2 Logger (org.slf4j.Logger)2 LoggerFactory (org.slf4j.LoggerFactory)2 Duration (java.time.Duration)1 ArrayList (java.util.ArrayList)1 Arrays (java.util.Arrays)1 Collection (java.util.Collection)1 HashSet (java.util.HashSet)1 List (java.util.List)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 Collectors (java.util.stream.Collectors)1 Response (javax.ws.rs.core.Response)1