Search in sources :

Example 46 with Time

use of org.apache.kafka.common.utils.Time in project kafka by apache.

the class KafkaAdminClientTest method testDeleteConsumerGroupsNumRetries.

@Test
public void testDeleteConsumerGroupsNumRetries() throws Exception {
    final Cluster cluster = mockCluster(3, 0);
    final Time time = new MockTime();
    final List<String> groupIds = singletonList("groupId");
    try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRIES_CONFIG, "0")) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
        final DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection();
        validResponse.add(new DeletableGroupResult().setGroupId("groupId").setErrorCode(Errors.NOT_COORDINATOR.code()));
        env.kafkaClient().prepareResponse(new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(validResponse)));
        env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
        final DeleteConsumerGroupsResult result = env.adminClient().deleteConsumerGroups(groupIds);
        TestUtils.assertFutureError(result.all(), TimeoutException.class);
    }
}
Also used : DeletableGroupResult(org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResult) DeleteGroupsResponseData(org.apache.kafka.common.message.DeleteGroupsResponseData) DeleteGroupsResponse(org.apache.kafka.common.requests.DeleteGroupsResponse) Cluster(org.apache.kafka.common.Cluster) DeletableGroupResultCollection(org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResultCollection) Time(org.apache.kafka.common.utils.Time) MockTime(org.apache.kafka.common.utils.MockTime) MockTime(org.apache.kafka.common.utils.MockTime) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 47 with Time

use of org.apache.kafka.common.utils.Time in project kafka by apache.

the class KafkaAdminClientTest method testListConsumerGroupOffsetsNumRetries.

@Test
public void testListConsumerGroupOffsetsNumRetries() throws Exception {
    final Cluster cluster = mockCluster(3, 0);
    final Time time = new MockTime();
    try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRIES_CONFIG, "0")) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
        env.kafkaClient().prepareResponse(new OffsetFetchResponse(Errors.NOT_COORDINATOR, Collections.emptyMap()));
        env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
        final ListConsumerGroupOffsetsResult result = env.adminClient().listConsumerGroupOffsets(GROUP_ID);
        TestUtils.assertFutureError(result.partitionsToOffsetAndMetadata(), TimeoutException.class);
    }
}
Also used : OffsetFetchResponse(org.apache.kafka.common.requests.OffsetFetchResponse) Cluster(org.apache.kafka.common.Cluster) Time(org.apache.kafka.common.utils.Time) MockTime(org.apache.kafka.common.utils.MockTime) MockTime(org.apache.kafka.common.utils.MockTime) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 48 with Time

use of org.apache.kafka.common.utils.Time in project kafka by apache.

the class ConnectStandalone method main.

public static void main(String[] args) {
    if (args.length < 2 || Arrays.asList(args).contains("--help")) {
        log.info("Usage: ConnectStandalone worker.properties connector1.properties [connector2.properties ...]");
        Exit.exit(1);
    }
    try {
        Time time = Time.SYSTEM;
        log.info("Kafka Connect standalone worker initializing ...");
        long initStart = time.hiResClockMs();
        WorkerInfo initInfo = new WorkerInfo();
        initInfo.logAll();
        String workerPropsFile = args[0];
        Map<String, String> workerProps = !workerPropsFile.isEmpty() ? Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.emptyMap();
        log.info("Scanning for plugin classes. This might take a moment ...");
        Plugins plugins = new Plugins(workerProps);
        plugins.compareAndSwapWithDelegatingLoader();
        StandaloneConfig config = new StandaloneConfig(workerProps);
        String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
        log.debug("Kafka cluster ID: {}", kafkaClusterId);
        RestServer rest = new RestServer(config);
        rest.initializeServer();
        URI advertisedUrl = rest.advertisedUrl();
        String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();
        ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy = plugins.newPlugin(config.getString(WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG), config, ConnectorClientConfigOverridePolicy.class);
        Worker worker = new Worker(workerId, time, plugins, config, new FileOffsetBackingStore(), connectorClientConfigOverridePolicy);
        Herder herder = new StandaloneHerder(worker, kafkaClusterId, connectorClientConfigOverridePolicy);
        final Connect connect = new Connect(herder, rest);
        log.info("Kafka Connect standalone worker initialization took {}ms", time.hiResClockMs() - initStart);
        try {
            connect.start();
            for (final String connectorPropsFile : Arrays.copyOfRange(args, 1, args.length)) {
                Map<String, String> connectorProps = Utils.propsToStringMap(Utils.loadProps(connectorPropsFile));
                FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>((error, info) -> {
                    if (error != null)
                        log.error("Failed to create job for {}", connectorPropsFile);
                    else
                        log.info("Created connector {}", info.result().name());
                });
                herder.putConnectorConfig(connectorProps.get(ConnectorConfig.NAME_CONFIG), connectorProps, false, cb);
                cb.get();
            }
        } catch (Throwable t) {
            log.error("Stopping after connector error", t);
            connect.stop();
            Exit.exit(3);
        }
        // Shutdown will be triggered by Ctrl-C or via HTTP shutdown request
        connect.awaitStop();
    } catch (Throwable t) {
        log.error("Stopping due to error", t);
        Exit.exit(2);
    }
}
Also used : Connect(org.apache.kafka.connect.runtime.Connect) WorkerInfo(org.apache.kafka.connect.runtime.WorkerInfo) Time(org.apache.kafka.common.utils.Time) ConnectorClientConfigOverridePolicy(org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy) URI(java.net.URI) FileOffsetBackingStore(org.apache.kafka.connect.storage.FileOffsetBackingStore) RestServer(org.apache.kafka.connect.runtime.rest.RestServer) StandaloneHerder(org.apache.kafka.connect.runtime.standalone.StandaloneHerder) Worker(org.apache.kafka.connect.runtime.Worker) StandaloneConfig(org.apache.kafka.connect.runtime.standalone.StandaloneConfig) Herder(org.apache.kafka.connect.runtime.Herder) StandaloneHerder(org.apache.kafka.connect.runtime.standalone.StandaloneHerder) FutureCallback(org.apache.kafka.connect.util.FutureCallback) Plugins(org.apache.kafka.connect.runtime.isolation.Plugins)

Example 49 with Time

use of org.apache.kafka.common.utils.Time in project kafka by apache.

the class KafkaProducerTest method testCommitTransactionWithSendToInvalidTopic.

@Test
public void testCommitTransactionWithSendToInvalidTopic() throws Exception {
    Map<String, Object> configs = new HashMap<>();
    configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
    configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
    configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, "15000");
    Time time = new MockTime();
    MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, emptyMap());
    ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
    metadata.updateWithCurrentRequestVersion(initialUpdateResponse, false, time.milliseconds());
    MockClient client = new MockClient(time, metadata);
    client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
    client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
    // Invalid topic name due to space
    String invalidTopicName = "topic abc";
    ProducerRecord<String, String> record = new ProducerRecord<>(invalidTopicName, "HelloKafka");
    List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>();
    topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.INVALID_TOPIC_EXCEPTION, invalidTopicName, false, Collections.emptyList()));
    MetadataResponse updateResponse = RequestTestUtils.metadataResponse(new ArrayList<>(initialUpdateResponse.brokers()), initialUpdateResponse.clusterId(), initialUpdateResponse.controller().id(), topicMetadata);
    client.prepareMetadataUpdate(updateResponse);
    try (Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
        producer.initTransactions();
        producer.beginTransaction();
        TestUtils.assertFutureError(producer.send(record), InvalidTopicException.class);
        assertThrows(KafkaException.class, producer::commitTransaction);
    }
}
Also used : ProducerMetadata(org.apache.kafka.clients.producer.internals.ProducerMetadata) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 50 with Time

use of org.apache.kafka.common.utils.Time in project kafka by apache.

the class KafkaProducerTest method testMeasureAbortTransactionDuration.

@Test
public void testMeasureAbortTransactionDuration() {
    Map<String, Object> configs = new HashMap<>();
    configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
    configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
    Time time = new MockTime(1);
    MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
    ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
    MockClient client = new MockClient(time, metadata);
    client.updateMetadata(initialUpdateResponse);
    client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
    client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
    try (KafkaProducer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
        producer.initTransactions();
        client.prepareResponse(endTxnResponse(Errors.NONE));
        producer.beginTransaction();
        producer.abortTransaction();
        double first = getMetricValue(producer, "txn-abort-time-ns-total");
        assertTrue(first > 0);
        client.prepareResponse(endTxnResponse(Errors.NONE));
        producer.beginTransaction();
        producer.abortTransaction();
        assertTrue(getMetricValue(producer, "txn-abort-time-ns-total") > first);
    }
}
Also used : ProducerMetadata(org.apache.kafka.clients.producer.internals.ProducerMetadata) HashMap(java.util.HashMap) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Aggregations

Time (org.apache.kafka.common.utils.Time)125 MockTime (org.apache.kafka.common.utils.MockTime)107 Test (org.junit.jupiter.api.Test)63 MockClient (org.apache.kafka.clients.MockClient)55 HashMap (java.util.HashMap)53 Cluster (org.apache.kafka.common.Cluster)41 Test (org.junit.Test)40 Node (org.apache.kafka.common.Node)39 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)32 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)31 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)30 Metadata (org.apache.kafka.clients.Metadata)28 ProducerMetadata (org.apache.kafka.clients.producer.internals.ProducerMetadata)25 TopicPartition (org.apache.kafka.common.TopicPartition)22 PartitionAssignor (org.apache.kafka.clients.consumer.internals.PartitionAssignor)21 LogContext (org.apache.kafka.common.utils.LogContext)17 Map (java.util.Map)14 Properties (java.util.Properties)14 MetricName (org.apache.kafka.common.MetricName)14 ExecutionException (java.util.concurrent.ExecutionException)13