Search in sources :

Example 16 with Time

use of org.apache.kafka.common.utils.Time in project apache-kafka-on-k8s by banzaicloud.

the class KafkaConsumerTest method testResetToCommittedOffset.

@Test
public void testResetToCommittedOffset() {
    Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(topic, 1);
    Node node = cluster.nodes().get(0);
    Metadata metadata = createMetadata();
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RoundRobinAssignor();
    KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.NONE, true);
    consumer.assign(singletonList(tp0));
    client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
    Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
    client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 539L), Errors.NONE), coordinator);
    consumer.poll(0);
    assertEquals(539L, consumer.position(tp0));
}
Also used : FindCoordinatorResponse(org.apache.kafka.common.requests.FindCoordinatorResponse) Node(org.apache.kafka.common.Node) Metadata(org.apache.kafka.clients.Metadata) Cluster(org.apache.kafka.common.Cluster) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.Test)

Example 17 with Time

use of org.apache.kafka.common.utils.Time in project apache-kafka-on-k8s by banzaicloud.

the class KafkaConsumerTest method testMissingOffsetNoResetPolicy.

@Test(expected = NoOffsetForPartitionException.class)
public void testMissingOffsetNoResetPolicy() {
    Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(topic, 1);
    Node node = cluster.nodes().get(0);
    Metadata metadata = createMetadata();
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RoundRobinAssignor();
    KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.NONE, true);
    consumer.assign(singletonList(tp0));
    client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
    Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
    // lookup committed offset and find nothing
    client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, -1L), Errors.NONE), coordinator);
    consumer.poll(0);
}
Also used : FindCoordinatorResponse(org.apache.kafka.common.requests.FindCoordinatorResponse) Node(org.apache.kafka.common.Node) Metadata(org.apache.kafka.clients.Metadata) Cluster(org.apache.kafka.common.Cluster) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.Test)

Example 18 with Time

use of org.apache.kafka.common.utils.Time in project apache-kafka-on-k8s by banzaicloud.

the class KafkaConsumerTest method testRegexSubscription.

@Test
public void testRegexSubscription() {
    String unmatchedTopic = "unmatched";
    Time time = new MockTime();
    Map<String, Integer> topicMetadata = new HashMap<>();
    topicMetadata.put(topic, 1);
    topicMetadata.put(unmatchedTopic, 1);
    Cluster cluster = TestUtils.clusterWith(1, topicMetadata);
    Metadata metadata = createMetadata();
    Node node = cluster.nodes().get(0);
    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RoundRobinAssignor();
    KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true);
    prepareRebalance(client, node, singleton(topic), assignor, singletonList(tp0), null);
    consumer.subscribe(Pattern.compile(topic), getConsumerRebalanceListener(consumer));
    client.prepareMetadataUpdate(cluster, Collections.<String>emptySet());
    consumer.poll(0);
    assertEquals(singleton(topic), consumer.subscription());
    assertEquals(singleton(tp0), consumer.assignment());
    consumer.close(0, TimeUnit.MILLISECONDS);
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Node(org.apache.kafka.common.Node) Metadata(org.apache.kafka.clients.Metadata) Cluster(org.apache.kafka.common.Cluster) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.Test)

Example 19 with Time

use of org.apache.kafka.common.utils.Time in project apache-kafka-on-k8s by banzaicloud.

the class KafkaConsumerTest method testSubscriptionChangesWithAutoCommitDisabled.

/**
 * Verify that when a consumer changes its topic subscription its assigned partitions
 * do not immediately change, and the consumed offsets of its to-be-revoked partitions
 * are not committed (when auto-commit is disabled).
 * Upon unsubscribing from subscribed topics, the assigned partitions immediately
 * change but if auto-commit is disabled the consumer offsets are not committed.
 */
@Test
public void testSubscriptionChangesWithAutoCommitDisabled() {
    Time time = new MockTime();
    Map<String, Integer> tpCounts = new HashMap<>();
    tpCounts.put(topic, 1);
    tpCounts.put(topic2, 1);
    Cluster cluster = TestUtils.singletonCluster(tpCounts);
    Node node = cluster.nodes().get(0);
    Metadata metadata = createMetadata();
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RangeAssignor();
    KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, false);
    // initial subscription
    consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer));
    // verify that subscription has changed but assignment is still unchanged
    assertTrue(consumer.subscription().equals(singleton(topic)));
    assertTrue(consumer.assignment().isEmpty());
    // mock rebalance responses
    prepareRebalance(client, node, assignor, singletonList(tp0), null);
    consumer.poll(0);
    // verify that subscription is still the same, and now assignment has caught up
    assertTrue(consumer.subscription().equals(singleton(topic)));
    assertTrue(consumer.assignment().equals(singleton(tp0)));
    consumer.poll(0);
    // subscription change
    consumer.subscribe(singleton(topic2), getConsumerRebalanceListener(consumer));
    // verify that subscription has changed but assignment is still unchanged
    assertTrue(consumer.subscription().equals(singleton(topic2)));
    assertTrue(consumer.assignment().equals(singleton(tp0)));
    // the auto commit is disabled, so no offset commit request should be sent
    for (ClientRequest req : client.requests()) assertTrue(req.requestBuilder().apiKey() != ApiKeys.OFFSET_COMMIT);
    // subscription change
    consumer.unsubscribe();
    // verify that subscription and assignment are both updated
    assertTrue(consumer.subscription().isEmpty());
    assertTrue(consumer.assignment().isEmpty());
    // the auto commit is disabled, so no offset commit request should be sent
    for (ClientRequest req : client.requests()) assertTrue(req.requestBuilder().apiKey() != ApiKeys.OFFSET_COMMIT);
    client.requests().clear();
    consumer.close();
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Node(org.apache.kafka.common.Node) Metadata(org.apache.kafka.clients.Metadata) Cluster(org.apache.kafka.common.Cluster) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) MockTime(org.apache.kafka.common.utils.MockTime) ClientRequest(org.apache.kafka.clients.ClientRequest) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.Test)

Example 20 with Time

use of org.apache.kafka.common.utils.Time in project apache-kafka-on-k8s by banzaicloud.

the class ConnectStandalone method main.

public static void main(String[] args) throws Exception {
    if (args.length < 2) {
        log.info("Usage: ConnectStandalone worker.properties connector1.properties [connector2.properties ...]");
        Exit.exit(1);
    }
    try {
        Time time = Time.SYSTEM;
        log.info("Kafka Connect standalone worker initializing ...");
        long initStart = time.hiResClockMs();
        WorkerInfo initInfo = new WorkerInfo();
        initInfo.logAll();
        String workerPropsFile = args[0];
        Map<String, String> workerProps = !workerPropsFile.isEmpty() ? Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.<String, String>emptyMap();
        log.info("Scanning for plugin classes. This might take a moment ...");
        Plugins plugins = new Plugins(workerProps);
        plugins.compareAndSwapWithDelegatingLoader();
        StandaloneConfig config = new StandaloneConfig(workerProps);
        String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
        log.debug("Kafka cluster ID: {}", kafkaClusterId);
        RestServer rest = new RestServer(config);
        URI advertisedUrl = rest.advertisedUrl();
        String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();
        Worker worker = new Worker(workerId, time, plugins, config, new FileOffsetBackingStore());
        Herder herder = new StandaloneHerder(worker, kafkaClusterId);
        final Connect connect = new Connect(herder, rest);
        log.info("Kafka Connect standalone worker initialization took {}ms", time.hiResClockMs() - initStart);
        try {
            connect.start();
            for (final String connectorPropsFile : Arrays.copyOfRange(args, 1, args.length)) {
                Map<String, String> connectorProps = Utils.propsToStringMap(Utils.loadProps(connectorPropsFile));
                FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(new Callback<Herder.Created<ConnectorInfo>>() {

                    @Override
                    public void onCompletion(Throwable error, Herder.Created<ConnectorInfo> info) {
                        if (error != null)
                            log.error("Failed to create job for {}", connectorPropsFile);
                        else
                            log.info("Created connector {}", info.result().name());
                    }
                });
                herder.putConnectorConfig(connectorProps.get(ConnectorConfig.NAME_CONFIG), connectorProps, false, cb);
                cb.get();
            }
        } catch (Throwable t) {
            log.error("Stopping after connector error", t);
            connect.stop();
            Exit.exit(3);
        }
        // Shutdown will be triggered by Ctrl-C or via HTTP shutdown request
        connect.awaitStop();
    } catch (Throwable t) {
        log.error("Stopping due to error", t);
        Exit.exit(2);
    }
}
Also used : ConnectorInfo(org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo) Connect(org.apache.kafka.connect.runtime.Connect) WorkerInfo(org.apache.kafka.connect.runtime.WorkerInfo) Time(org.apache.kafka.common.utils.Time) URI(java.net.URI) FileOffsetBackingStore(org.apache.kafka.connect.storage.FileOffsetBackingStore) RestServer(org.apache.kafka.connect.runtime.rest.RestServer) StandaloneHerder(org.apache.kafka.connect.runtime.standalone.StandaloneHerder) Worker(org.apache.kafka.connect.runtime.Worker) StandaloneConfig(org.apache.kafka.connect.runtime.standalone.StandaloneConfig) Herder(org.apache.kafka.connect.runtime.Herder) StandaloneHerder(org.apache.kafka.connect.runtime.standalone.StandaloneHerder) FutureCallback(org.apache.kafka.connect.util.FutureCallback) Plugins(org.apache.kafka.connect.runtime.isolation.Plugins)

Aggregations

Time (org.apache.kafka.common.utils.Time)125 MockTime (org.apache.kafka.common.utils.MockTime)107 Test (org.junit.jupiter.api.Test)63 MockClient (org.apache.kafka.clients.MockClient)55 HashMap (java.util.HashMap)53 Cluster (org.apache.kafka.common.Cluster)41 Test (org.junit.Test)40 Node (org.apache.kafka.common.Node)39 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)32 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)31 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)30 Metadata (org.apache.kafka.clients.Metadata)28 ProducerMetadata (org.apache.kafka.clients.producer.internals.ProducerMetadata)25 TopicPartition (org.apache.kafka.common.TopicPartition)22 PartitionAssignor (org.apache.kafka.clients.consumer.internals.PartitionAssignor)21 LogContext (org.apache.kafka.common.utils.LogContext)17 Map (java.util.Map)14 Properties (java.util.Properties)14 MetricName (org.apache.kafka.common.MetricName)14 ExecutionException (java.util.concurrent.ExecutionException)13