Search in sources :

Example 1 with MirrorClient

use of org.apache.kafka.connect.mirror.MirrorClient in project kafka by apache.

the class MirrorConnectorsIntegrationBaseTest method testReplication.

@Test
public void testReplication() throws Exception {
    produceMessages(primary, "test-topic-1");
    produceMessages(backup, "test-topic-1");
    String consumerGroupName = "consumer-group-testReplication";
    Map<String, Object> consumerProps = new HashMap<String, Object>() {

        {
            put("group.id", consumerGroupName);
            put("auto.offset.reset", "latest");
        }
    };
    // warm up consumers before starting the connectors so we don't need to wait for discovery
    warmUpConsumer(consumerProps);
    mm2Config = new MirrorMakerConfig(mm2Props);
    waitUntilMirrorMakerIsRunning(backup, CONNECTOR_LIST, mm2Config, PRIMARY_CLUSTER_ALIAS, BACKUP_CLUSTER_ALIAS);
    waitUntilMirrorMakerIsRunning(primary, CONNECTOR_LIST, mm2Config, BACKUP_CLUSTER_ALIAS, PRIMARY_CLUSTER_ALIAS);
    MirrorClient primaryClient = new MirrorClient(mm2Config.clientConfig(PRIMARY_CLUSTER_ALIAS));
    MirrorClient backupClient = new MirrorClient(mm2Config.clientConfig(BACKUP_CLUSTER_ALIAS));
    // make sure the topic is auto-created in the other cluster
    waitForTopicCreated(primary, "backup.test-topic-1");
    waitForTopicCreated(backup, "primary.test-topic-1");
    waitForTopicCreated(primary, "mm2-offset-syncs.backup.internal");
    assertEquals(TopicConfig.CLEANUP_POLICY_COMPACT, getTopicConfig(backup.kafka(), "primary.test-topic-1", TopicConfig.CLEANUP_POLICY_CONFIG), "topic config was not synced");
    createAndTestNewTopicWithConfigFilter();
    assertEquals(NUM_RECORDS_PRODUCED, primary.kafka().consume(NUM_RECORDS_PRODUCED, RECORD_TRANSFER_DURATION_MS, "test-topic-1").count(), "Records were not produced to primary cluster.");
    assertEquals(NUM_RECORDS_PRODUCED, backup.kafka().consume(NUM_RECORDS_PRODUCED, RECORD_TRANSFER_DURATION_MS, "primary.test-topic-1").count(), "Records were not replicated to backup cluster.");
    assertEquals(NUM_RECORDS_PRODUCED, backup.kafka().consume(NUM_RECORDS_PRODUCED, RECORD_TRANSFER_DURATION_MS, "test-topic-1").count(), "Records were not produced to backup cluster.");
    assertEquals(NUM_RECORDS_PRODUCED, primary.kafka().consume(NUM_RECORDS_PRODUCED, RECORD_TRANSFER_DURATION_MS, "backup.test-topic-1").count(), "Records were not replicated to primary cluster.");
    assertEquals(NUM_RECORDS_PRODUCED * 2, primary.kafka().consume(NUM_RECORDS_PRODUCED * 2, RECORD_TRANSFER_DURATION_MS, "backup.test-topic-1", "test-topic-1").count(), "Primary cluster doesn't have all records from both clusters.");
    assertEquals(NUM_RECORDS_PRODUCED * 2, backup.kafka().consume(NUM_RECORDS_PRODUCED * 2, RECORD_TRANSFER_DURATION_MS, "primary.test-topic-1", "test-topic-1").count(), "Backup cluster doesn't have all records from both clusters.");
    assertTrue(primary.kafka().consume(1, RECORD_TRANSFER_DURATION_MS, "heartbeats").count() > 0, "Heartbeats were not emitted to primary cluster.");
    assertTrue(backup.kafka().consume(1, RECORD_TRANSFER_DURATION_MS, "heartbeats").count() > 0, "Heartbeats were not emitted to backup cluster.");
    assertTrue(backup.kafka().consume(1, RECORD_TRANSFER_DURATION_MS, "primary.heartbeats").count() > 0, "Heartbeats were not replicated downstream to backup cluster.");
    assertTrue(primary.kafka().consume(1, RECORD_TRANSFER_DURATION_MS, "backup.heartbeats").count() > 0, "Heartbeats were not replicated downstream to primary cluster.");
    assertTrue(backupClient.upstreamClusters().contains(PRIMARY_CLUSTER_ALIAS), "Did not find upstream primary cluster.");
    assertEquals(1, backupClient.replicationHops(PRIMARY_CLUSTER_ALIAS), "Did not calculate replication hops correctly.");
    assertTrue(primaryClient.upstreamClusters().contains(BACKUP_CLUSTER_ALIAS), "Did not find upstream backup cluster.");
    assertEquals(1, primaryClient.replicationHops(BACKUP_CLUSTER_ALIAS), "Did not calculate replication hops correctly.");
    assertTrue(backup.kafka().consume(1, CHECKPOINT_DURATION_MS, "primary.checkpoints.internal").count() > 0, "Checkpoints were not emitted downstream to backup cluster.");
    Map<TopicPartition, OffsetAndMetadata> backupOffsets = backupClient.remoteConsumerOffsets(consumerGroupName, PRIMARY_CLUSTER_ALIAS, Duration.ofMillis(CHECKPOINT_DURATION_MS));
    assertTrue(backupOffsets.containsKey(new TopicPartition("primary.test-topic-1", 0)), "Offsets not translated downstream to backup cluster. Found: " + backupOffsets);
    // Failover consumer group to backup cluster.
    try (Consumer<byte[], byte[]> primaryConsumer = backup.kafka().createConsumer(Collections.singletonMap("group.id", consumerGroupName))) {
        primaryConsumer.assign(backupOffsets.keySet());
        backupOffsets.forEach(primaryConsumer::seek);
        primaryConsumer.poll(CONSUMER_POLL_TIMEOUT_MS);
        primaryConsumer.commitAsync();
        assertTrue(primaryConsumer.position(new TopicPartition("primary.test-topic-1", 0)) > 0, "Consumer failedover to zero offset.");
        assertTrue(primaryConsumer.position(new TopicPartition("primary.test-topic-1", 0)) <= NUM_RECORDS_PRODUCED, "Consumer failedover beyond expected offset.");
        assertTrue(primary.kafka().consume(1, CHECKPOINT_DURATION_MS, "backup.checkpoints.internal").count() > 0, "Checkpoints were not emitted upstream to primary cluster.");
    }
    waitForCondition(() -> primaryClient.remoteConsumerOffsets(consumerGroupName, BACKUP_CLUSTER_ALIAS, Duration.ofMillis(CHECKPOINT_DURATION_MS)).containsKey(new TopicPartition("backup.test-topic-1", 0)), CHECKPOINT_DURATION_MS, "Offsets not translated downstream to primary cluster.");
    waitForCondition(() -> primaryClient.remoteConsumerOffsets(consumerGroupName, BACKUP_CLUSTER_ALIAS, Duration.ofMillis(CHECKPOINT_DURATION_MS)).containsKey(new TopicPartition("test-topic-1", 0)), CHECKPOINT_DURATION_MS, "Offsets not translated upstream to primary cluster.");
    Map<TopicPartition, OffsetAndMetadata> primaryOffsets = primaryClient.remoteConsumerOffsets(consumerGroupName, BACKUP_CLUSTER_ALIAS, Duration.ofMillis(CHECKPOINT_DURATION_MS));
    primaryClient.close();
    backupClient.close();
    // Failback consumer group to primary cluster
    try (Consumer<byte[], byte[]> backupConsumer = primary.kafka().createConsumer(Collections.singletonMap("group.id", consumerGroupName))) {
        backupConsumer.assign(primaryOffsets.keySet());
        primaryOffsets.forEach(backupConsumer::seek);
        backupConsumer.poll(CONSUMER_POLL_TIMEOUT_MS);
        backupConsumer.commitAsync();
        assertTrue(backupConsumer.position(new TopicPartition("test-topic-1", 0)) > 0, "Consumer failedback to zero upstream offset.");
        assertTrue(backupConsumer.position(new TopicPartition("backup.test-topic-1", 0)) > 0, "Consumer failedback to zero downstream offset.");
        assertTrue(backupConsumer.position(new TopicPartition("test-topic-1", 0)) <= NUM_RECORDS_PRODUCED, "Consumer failedback beyond expected upstream offset.");
        assertTrue(backupConsumer.position(new TopicPartition("backup.test-topic-1", 0)) <= NUM_RECORDS_PRODUCED, "Consumer failedback beyond expected downstream offset.");
    }
    // create more matching topics
    primary.kafka().createTopic("test-topic-2", NUM_PARTITIONS);
    backup.kafka().createTopic("test-topic-3", NUM_PARTITIONS);
    // make sure the topic is auto-created in the other cluster
    waitForTopicCreated(backup, "primary.test-topic-2");
    waitForTopicCreated(primary, "backup.test-topic-3");
    // only produce messages to the first partition
    produceMessages(primary, "test-topic-2", 1);
    produceMessages(backup, "test-topic-3", 1);
    // expect total consumed messages equals to NUM_RECORDS_PER_PARTITION
    assertEquals(NUM_RECORDS_PER_PARTITION, primary.kafka().consume(NUM_RECORDS_PER_PARTITION, RECORD_TRANSFER_DURATION_MS, "test-topic-2").count(), "Records were not produced to primary cluster.");
    assertEquals(NUM_RECORDS_PER_PARTITION, backup.kafka().consume(NUM_RECORDS_PER_PARTITION, RECORD_TRANSFER_DURATION_MS, "test-topic-3").count(), "Records were not produced to backup cluster.");
    assertEquals(NUM_RECORDS_PER_PARTITION, primary.kafka().consume(NUM_RECORDS_PER_PARTITION, 2 * RECORD_TRANSFER_DURATION_MS, "backup.test-topic-3").count(), "New topic was not replicated to primary cluster.");
    assertEquals(NUM_RECORDS_PER_PARTITION, backup.kafka().consume(NUM_RECORDS_PER_PARTITION, 2 * RECORD_TRANSFER_DURATION_MS, "primary.test-topic-2").count(), "New topic was not replicated to backup cluster.");
}
Also used : MirrorClient(org.apache.kafka.connect.mirror.MirrorClient) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) MirrorMakerConfig(org.apache.kafka.connect.mirror.MirrorMakerConfig) Test(org.junit.jupiter.api.Test)

Example 2 with MirrorClient

use of org.apache.kafka.connect.mirror.MirrorClient in project kafka by apache.

the class MirrorConnectorsIntegrationBaseTest method testOffsetSyncsTopicsOnTarget.

@Test
public void testOffsetSyncsTopicsOnTarget() throws Exception {
    // move offset-syncs topics to target
    mm2Props.put(PRIMARY_CLUSTER_ALIAS + "->" + BACKUP_CLUSTER_ALIAS + ".offset-syncs.topic.location", "target");
    // one way replication from primary to backup
    mm2Props.put(BACKUP_CLUSTER_ALIAS + "->" + PRIMARY_CLUSTER_ALIAS + ".enabled", "false");
    mm2Config = new MirrorMakerConfig(mm2Props);
    waitUntilMirrorMakerIsRunning(backup, CONNECTOR_LIST, mm2Config, PRIMARY_CLUSTER_ALIAS, BACKUP_CLUSTER_ALIAS);
    // Ensure the offset syncs topic is created in the target cluster
    waitForTopicCreated(backup.kafka(), "mm2-offset-syncs." + PRIMARY_CLUSTER_ALIAS + ".internal");
    produceMessages(primary, "test-topic-1");
    ReplicationPolicy replicationPolicy = new MirrorClient(mm2Config.clientConfig(BACKUP_CLUSTER_ALIAS)).replicationPolicy();
    String remoteTopic = replicationPolicy.formatRemoteTopic(PRIMARY_CLUSTER_ALIAS, "test-topic-1");
    // Check offsets are pushed to the checkpoint topic
    Consumer<byte[], byte[]> backupConsumer = backup.kafka().createConsumerAndSubscribeTo(Collections.singletonMap("auto.offset.reset", "earliest"), PRIMARY_CLUSTER_ALIAS + ".checkpoints.internal");
    waitForCondition(() -> {
        ConsumerRecords<byte[], byte[]> records = backupConsumer.poll(Duration.ofSeconds(1L));
        for (ConsumerRecord<byte[], byte[]> record : records) {
            Checkpoint checkpoint = Checkpoint.deserializeRecord(record);
            if (remoteTopic.equals(checkpoint.topicPartition().topic())) {
                return true;
            }
        }
        return false;
    }, 30_000, "Unable to find checkpoints for " + PRIMARY_CLUSTER_ALIAS + ".test-topic-1");
    // Ensure no offset-syncs topics have been created on the primary cluster
    Set<String> primaryTopics = primary.kafka().createAdminClient().listTopics().names().get();
    assertFalse(primaryTopics.contains("mm2-offset-syncs." + PRIMARY_CLUSTER_ALIAS + ".internal"));
    assertFalse(primaryTopics.contains("mm2-offset-syncs." + BACKUP_CLUSTER_ALIAS + ".internal"));
}
Also used : MirrorClient(org.apache.kafka.connect.mirror.MirrorClient) Checkpoint(org.apache.kafka.connect.mirror.Checkpoint) ReplicationPolicy(org.apache.kafka.connect.mirror.ReplicationPolicy) MirrorMakerConfig(org.apache.kafka.connect.mirror.MirrorMakerConfig) Test(org.junit.jupiter.api.Test)

Example 3 with MirrorClient

use of org.apache.kafka.connect.mirror.MirrorClient in project kafka by apache.

the class IdentityReplicationIntegrationTest method testReplication.

@Test
public void testReplication() throws Exception {
    produceMessages(primary, "test-topic-1");
    String consumerGroupName = "consumer-group-testReplication";
    Map<String, Object> consumerProps = new HashMap<String, Object>() {

        {
            put("group.id", consumerGroupName);
            put("auto.offset.reset", "latest");
        }
    };
    // warm up consumers before starting the connectors so we don't need to wait for discovery
    warmUpConsumer(consumerProps);
    mm2Config = new MirrorMakerConfig(mm2Props);
    waitUntilMirrorMakerIsRunning(backup, CONNECTOR_LIST, mm2Config, PRIMARY_CLUSTER_ALIAS, BACKUP_CLUSTER_ALIAS);
    waitUntilMirrorMakerIsRunning(primary, Collections.singletonList(MirrorHeartbeatConnector.class), mm2Config, BACKUP_CLUSTER_ALIAS, PRIMARY_CLUSTER_ALIAS);
    MirrorClient primaryClient = new MirrorClient(mm2Config.clientConfig(PRIMARY_CLUSTER_ALIAS));
    MirrorClient backupClient = new MirrorClient(mm2Config.clientConfig(BACKUP_CLUSTER_ALIAS));
    // make sure the topic is auto-created in the other cluster
    waitForTopicCreated(primary, "test-topic-1");
    waitForTopicCreated(backup, "test-topic-1");
    assertEquals(TopicConfig.CLEANUP_POLICY_COMPACT, getTopicConfig(backup.kafka(), "test-topic-1", TopicConfig.CLEANUP_POLICY_CONFIG), "topic config was not synced");
    createAndTestNewTopicWithConfigFilter();
    assertEquals(NUM_RECORDS_PRODUCED, primary.kafka().consume(NUM_RECORDS_PRODUCED, RECORD_TRANSFER_DURATION_MS, "test-topic-1").count(), "Records were not produced to primary cluster.");
    assertEquals(NUM_RECORDS_PRODUCED, backup.kafka().consume(NUM_RECORDS_PRODUCED, RECORD_TRANSFER_DURATION_MS, "test-topic-1").count(), "Records were not replicated to backup cluster.");
    assertTrue(primary.kafka().consume(1, RECORD_TRANSFER_DURATION_MS, "heartbeats").count() > 0, "Heartbeats were not emitted to primary cluster.");
    assertTrue(backup.kafka().consume(1, RECORD_TRANSFER_DURATION_MS, "heartbeats").count() > 0, "Heartbeats were not emitted to backup cluster.");
    assertTrue(backup.kafka().consume(1, RECORD_TRANSFER_DURATION_MS, "primary.heartbeats").count() > 0, "Heartbeats were not replicated downstream to backup cluster.");
    assertTrue(primary.kafka().consume(1, RECORD_TRANSFER_DURATION_MS, "heartbeats").count() > 0, "Heartbeats were not replicated downstream to primary cluster.");
    assertTrue(backupClient.upstreamClusters().contains(PRIMARY_CLUSTER_ALIAS), "Did not find upstream primary cluster.");
    assertEquals(1, backupClient.replicationHops(PRIMARY_CLUSTER_ALIAS), "Did not calculate replication hops correctly.");
    assertTrue(backup.kafka().consume(1, CHECKPOINT_DURATION_MS, "primary.checkpoints.internal").count() > 0, "Checkpoints were not emitted downstream to backup cluster.");
    Map<TopicPartition, OffsetAndMetadata> backupOffsets = backupClient.remoteConsumerOffsets(consumerGroupName, PRIMARY_CLUSTER_ALIAS, Duration.ofMillis(CHECKPOINT_DURATION_MS));
    assertTrue(backupOffsets.containsKey(new TopicPartition("test-topic-1", 0)), "Offsets not translated downstream to backup cluster. Found: " + backupOffsets);
    // Failover consumer group to backup cluster.
    try (Consumer<byte[], byte[]> primaryConsumer = backup.kafka().createConsumer(Collections.singletonMap("group.id", consumerGroupName))) {
        primaryConsumer.assign(backupOffsets.keySet());
        backupOffsets.forEach(primaryConsumer::seek);
        primaryConsumer.poll(CONSUMER_POLL_TIMEOUT_MS);
        primaryConsumer.commitAsync();
        assertTrue(primaryConsumer.position(new TopicPartition("test-topic-1", 0)) > 0, "Consumer failedover to zero offset.");
        assertTrue(primaryConsumer.position(new TopicPartition("test-topic-1", 0)) <= NUM_RECORDS_PRODUCED, "Consumer failedover beyond expected offset.");
    }
    primaryClient.close();
    backupClient.close();
    // create more matching topics
    primary.kafka().createTopic("test-topic-2", NUM_PARTITIONS);
    // make sure the topic is auto-created in the other cluster
    waitForTopicCreated(backup, "test-topic-2");
    // only produce messages to the first partition
    produceMessages(primary, "test-topic-2", 1);
    // expect total consumed messages equals to NUM_RECORDS_PER_PARTITION
    assertEquals(NUM_RECORDS_PER_PARTITION, primary.kafka().consume(NUM_RECORDS_PER_PARTITION, RECORD_TRANSFER_DURATION_MS, "test-topic-2").count(), "Records were not produced to primary cluster.");
    assertEquals(NUM_RECORDS_PER_PARTITION, backup.kafka().consume(NUM_RECORDS_PER_PARTITION, 2 * RECORD_TRANSFER_DURATION_MS, "test-topic-2").count(), "New topic was not replicated to backup cluster.");
}
Also used : MirrorClient(org.apache.kafka.connect.mirror.MirrorClient) HashMap(java.util.HashMap) MirrorHeartbeatConnector(org.apache.kafka.connect.mirror.MirrorHeartbeatConnector) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) MirrorMakerConfig(org.apache.kafka.connect.mirror.MirrorMakerConfig) Test(org.junit.jupiter.api.Test)

Aggregations

MirrorClient (org.apache.kafka.connect.mirror.MirrorClient)3 MirrorMakerConfig (org.apache.kafka.connect.mirror.MirrorMakerConfig)3 Test (org.junit.jupiter.api.Test)3 HashMap (java.util.HashMap)2 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)2 TopicPartition (org.apache.kafka.common.TopicPartition)2 Checkpoint (org.apache.kafka.connect.mirror.Checkpoint)1 MirrorHeartbeatConnector (org.apache.kafka.connect.mirror.MirrorHeartbeatConnector)1 ReplicationPolicy (org.apache.kafka.connect.mirror.ReplicationPolicy)1