Search in sources :

Example 6 with MirrorMakerConfig

use of org.apache.kafka.connect.mirror.MirrorMakerConfig in project kafka by apache.

the class MirrorConnectorsIntegrationBaseTest method startClusters.

public void startClusters(Map<String, String> additionalMM2Config) throws Exception {
    shuttingDown = false;
    exitProcedure = (code, message) -> {
        if (shuttingDown) {
            // ignore this since we're shutting down Connect and Kafka and timing isn't always great
            return;
        }
        if (code != 0) {
            String exitMessage = "Abrupt service exit with code " + code + " and message " + message;
            log.warn(exitMessage);
            throw new UngracefulShutdownException(exitMessage);
        }
    };
    haltProcedure = (code, message) -> {
        if (shuttingDown) {
            // ignore this since we're shutting down Connect and Kafka and timing isn't always great
            return;
        }
        if (code != 0) {
            String haltMessage = "Abrupt service halt with code " + code + " and message " + message;
            log.warn(haltMessage);
            throw new UngracefulShutdownException(haltMessage);
        }
    };
    // Override the exit and halt procedure that Connect and Kafka will use. For these integration tests,
    // we don't want to exit the JVM and instead simply want to fail the test
    Exit.setExitProcedure(exitProcedure);
    Exit.setHaltProcedure(haltProcedure);
    primaryBrokerProps.put("auto.create.topics.enable", "false");
    backupBrokerProps.put("auto.create.topics.enable", "false");
    mm2Props.putAll(basicMM2Config());
    mm2Props.putAll(additionalMM2Config);
    // exclude topic config:
    mm2Props.put(DefaultConfigPropertyFilter.CONFIG_PROPERTIES_EXCLUDE_CONFIG, "delete\\.retention\\..*");
    mm2Config = new MirrorMakerConfig(mm2Props);
    primaryWorkerProps = mm2Config.workerConfig(new SourceAndTarget(BACKUP_CLUSTER_ALIAS, PRIMARY_CLUSTER_ALIAS));
    backupWorkerProps.putAll(mm2Config.workerConfig(new SourceAndTarget(PRIMARY_CLUSTER_ALIAS, BACKUP_CLUSTER_ALIAS)));
    primary = new EmbeddedConnectCluster.Builder().name(PRIMARY_CLUSTER_ALIAS + "-connect-cluster").numWorkers(NUM_WORKERS).numBrokers(1).brokerProps(primaryBrokerProps).workerProps(primaryWorkerProps).maskExitProcedures(false).build();
    backup = new EmbeddedConnectCluster.Builder().name(BACKUP_CLUSTER_ALIAS + "-connect-cluster").numWorkers(NUM_WORKERS).numBrokers(1).brokerProps(backupBrokerProps).workerProps(backupWorkerProps).maskExitProcedures(false).build();
    primary.start();
    primary.assertions().assertAtLeastNumWorkersAreUp(NUM_WORKERS, "Workers of " + PRIMARY_CLUSTER_ALIAS + "-connect-cluster did not start in time.");
    waitForTopicCreated(primary, "mm2-status.backup.internal");
    waitForTopicCreated(primary, "mm2-offsets.backup.internal");
    waitForTopicCreated(primary, "mm2-configs.backup.internal");
    backup.start();
    backup.assertions().assertAtLeastNumWorkersAreUp(NUM_WORKERS, "Workers of " + BACKUP_CLUSTER_ALIAS + "-connect-cluster did not start in time.");
    waitForTopicCreated(backup, "mm2-status.primary.internal");
    waitForTopicCreated(backup, "mm2-offsets.primary.internal");
    waitForTopicCreated(backup, "mm2-configs.primary.internal");
    createTopics();
    warmUpConsumer(Collections.singletonMap("group.id", "consumer-group-dummy"));
    log.info(PRIMARY_CLUSTER_ALIAS + " REST service: {}", primary.endpointForResource("connectors"));
    log.info(BACKUP_CLUSTER_ALIAS + " REST service: {}", backup.endpointForResource("connectors"));
    log.info(PRIMARY_CLUSTER_ALIAS + " brokers: {}", primary.kafka().bootstrapServers());
    log.info(BACKUP_CLUSTER_ALIAS + " brokers: {}", backup.kafka().bootstrapServers());
    // now that the brokers are running, we can finish setting up the Connectors
    mm2Props.put(PRIMARY_CLUSTER_ALIAS + ".bootstrap.servers", primary.kafka().bootstrapServers());
    mm2Props.put(BACKUP_CLUSTER_ALIAS + ".bootstrap.servers", backup.kafka().bootstrapServers());
}
Also used : UngracefulShutdownException(org.apache.kafka.connect.util.clusters.UngracefulShutdownException) SourceAndTarget(org.apache.kafka.connect.mirror.SourceAndTarget) MirrorMakerConfig(org.apache.kafka.connect.mirror.MirrorMakerConfig)

Example 7 with MirrorMakerConfig

use of org.apache.kafka.connect.mirror.MirrorMakerConfig in project kafka by apache.

the class MirrorConnectorsIntegrationBaseTest method testReplicationWithEmptyPartition.

@Test
public void testReplicationWithEmptyPartition() throws Exception {
    String consumerGroupName = "consumer-group-testReplicationWithEmptyPartition";
    Map<String, Object> consumerProps = Collections.singletonMap("group.id", consumerGroupName);
    // create topic
    String topic = "test-topic-with-empty-partition";
    primary.kafka().createTopic(topic, NUM_PARTITIONS);
    // produce to all test-topic-empty's partitions, except the last partition
    produceMessages(primary, topic, NUM_PARTITIONS - 1);
    // consume before starting the connectors so we don't need to wait for discovery
    int expectedRecords = NUM_RECORDS_PER_PARTITION * (NUM_PARTITIONS - 1);
    try (Consumer<byte[], byte[]> primaryConsumer = primary.kafka().createConsumerAndSubscribeTo(consumerProps, topic)) {
        waitForConsumingAllRecords(primaryConsumer, expectedRecords);
    }
    // one way replication from primary to backup
    mm2Props.put(BACKUP_CLUSTER_ALIAS + "->" + PRIMARY_CLUSTER_ALIAS + ".enabled", "false");
    mm2Config = new MirrorMakerConfig(mm2Props);
    waitUntilMirrorMakerIsRunning(backup, CONNECTOR_LIST, mm2Config, PRIMARY_CLUSTER_ALIAS, BACKUP_CLUSTER_ALIAS);
    // sleep few seconds to have MM2 finish replication so that "end" consumer will consume some record
    Thread.sleep(TimeUnit.SECONDS.toMillis(3));
    String backupTopic = PRIMARY_CLUSTER_ALIAS + "." + topic;
    // consume all records from backup cluster
    try (Consumer<byte[], byte[]> backupConsumer = backup.kafka().createConsumerAndSubscribeTo(consumerProps, backupTopic)) {
        waitForConsumingAllRecords(backupConsumer, expectedRecords);
    }
    try (Admin backupClient = backup.kafka().createAdminClient()) {
        // retrieve the consumer group offset from backup cluster
        Map<TopicPartition, OffsetAndMetadata> remoteOffsets = backupClient.listConsumerGroupOffsets(consumerGroupName).partitionsToOffsetAndMetadata().get();
        // pinpoint the offset of the last partition which does not receive records
        OffsetAndMetadata offset = remoteOffsets.get(new TopicPartition(backupTopic, NUM_PARTITIONS - 1));
        // offset of the last partition should exist, but its value should be 0
        assertNotNull(offset, "Offset of last partition was not replicated");
        assertEquals(0, offset.offset(), "Offset of last partition is not zero");
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Admin(org.apache.kafka.clients.admin.Admin) Checkpoint(org.apache.kafka.connect.mirror.Checkpoint) MirrorMakerConfig(org.apache.kafka.connect.mirror.MirrorMakerConfig) Test(org.junit.jupiter.api.Test)

Example 8 with MirrorMakerConfig

use of org.apache.kafka.connect.mirror.MirrorMakerConfig in project kafka by apache.

the class MirrorConnectorsIntegrationBaseTest method testOneWayReplicationWithAutoOffsetSync.

@Test
public void testOneWayReplicationWithAutoOffsetSync() throws InterruptedException {
    produceMessages(primary, "test-topic-1");
    String consumerGroupName = "consumer-group-testOneWayReplicationWithAutoOffsetSync";
    Map<String, Object> consumerProps = new HashMap<String, Object>() {

        {
            put("group.id", consumerGroupName);
            put("auto.offset.reset", "earliest");
        }
    };
    // create consumers before starting the connectors so we don't need to wait for discovery
    try (Consumer<byte[], byte[]> primaryConsumer = primary.kafka().createConsumerAndSubscribeTo(consumerProps, "test-topic-1")) {
        // we need to wait for consuming all the records for MM2 replicating the expected offsets
        waitForConsumingAllRecords(primaryConsumer, NUM_RECORDS_PRODUCED);
    }
    // enable automated consumer group offset sync
    mm2Props.put("sync.group.offsets.enabled", "true");
    mm2Props.put("sync.group.offsets.interval.seconds", "1");
    // one way replication from primary to backup
    mm2Props.put(BACKUP_CLUSTER_ALIAS + "->" + PRIMARY_CLUSTER_ALIAS + ".enabled", "false");
    mm2Config = new MirrorMakerConfig(mm2Props);
    waitUntilMirrorMakerIsRunning(backup, CONNECTOR_LIST, mm2Config, PRIMARY_CLUSTER_ALIAS, BACKUP_CLUSTER_ALIAS);
    // make sure the topic is created in the other cluster
    waitForTopicCreated(primary, "backup.test-topic-1");
    waitForTopicCreated(backup, "primary.test-topic-1");
    // create a consumer at backup cluster with same consumer group Id to consume 1 topic
    Consumer<byte[], byte[]> backupConsumer = backup.kafka().createConsumerAndSubscribeTo(consumerProps, "primary.test-topic-1");
    waitForConsumerGroupOffsetSync(backup, backupConsumer, Collections.singletonList("primary.test-topic-1"), consumerGroupName, NUM_RECORDS_PRODUCED);
    ConsumerRecords<byte[], byte[]> records = backupConsumer.poll(CONSUMER_POLL_TIMEOUT_MS);
    // the size of consumer record should be zero, because the offsets of the same consumer group
    // have been automatically synchronized from primary to backup by the background job, so no
    // more records to consume from the replicated topic by the same consumer group at backup cluster
    assertEquals(0, records.count(), "consumer record size is not zero");
    // now create a new topic in primary cluster
    primary.kafka().createTopic("test-topic-2", NUM_PARTITIONS);
    // make sure the topic is created in backup cluster
    waitForTopicCreated(backup, "primary.test-topic-2");
    // produce some records to the new topic in primary cluster
    produceMessages(primary, "test-topic-2");
    // create a consumer at primary cluster to consume the new topic
    try (Consumer<byte[], byte[]> consumer1 = primary.kafka().createConsumerAndSubscribeTo(Collections.singletonMap("group.id", "consumer-group-1"), "test-topic-2")) {
        // we need to wait for consuming all the records for MM2 replicating the expected offsets
        waitForConsumingAllRecords(consumer1, NUM_RECORDS_PRODUCED);
    }
    // create a consumer at backup cluster with same consumer group Id to consume old and new topic
    backupConsumer = backup.kafka().createConsumerAndSubscribeTo(Collections.singletonMap("group.id", consumerGroupName), "primary.test-topic-1", "primary.test-topic-2");
    waitForConsumerGroupOffsetSync(backup, backupConsumer, Arrays.asList("primary.test-topic-1", "primary.test-topic-2"), consumerGroupName, NUM_RECORDS_PRODUCED);
    records = backupConsumer.poll(CONSUMER_POLL_TIMEOUT_MS);
    // similar reasoning as above, no more records to consume by the same consumer group at backup cluster
    assertEquals(0, records.count(), "consumer record size is not zero");
    backupConsumer.close();
}
Also used : HashMap(java.util.HashMap) MirrorMakerConfig(org.apache.kafka.connect.mirror.MirrorMakerConfig) Test(org.junit.jupiter.api.Test)

Aggregations

MirrorMakerConfig (org.apache.kafka.connect.mirror.MirrorMakerConfig)8 Test (org.junit.jupiter.api.Test)7 HashMap (java.util.HashMap)4 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)4 TopicPartition (org.apache.kafka.common.TopicPartition)4 MirrorClient (org.apache.kafka.connect.mirror.MirrorClient)3 Admin (org.apache.kafka.clients.admin.Admin)2 Checkpoint (org.apache.kafka.connect.mirror.Checkpoint)2 MirrorHeartbeatConnector (org.apache.kafka.connect.mirror.MirrorHeartbeatConnector)1 ReplicationPolicy (org.apache.kafka.connect.mirror.ReplicationPolicy)1 SourceAndTarget (org.apache.kafka.connect.mirror.SourceAndTarget)1 UngracefulShutdownException (org.apache.kafka.connect.util.clusters.UngracefulShutdownException)1