Search in sources :

Example 31 with AdminClient

use of org.apache.kafka.clients.admin.AdminClient in project hive by apache.

the class SingleNodeKafkaCluster method createTopic.

private void createTopic(String topic) {
    Properties properties = new Properties();
    properties.setProperty("bootstrap.servers", "localhost:9092");
    properties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    properties.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    int numPartitions = 1;
    short replicationFactor = 1;
    AdminClient adminClient = AdminClient.create(properties);
    NewTopic newTopic = new NewTopic(topic, numPartitions, replicationFactor);
    adminClient.createTopics(Collections.singletonList(newTopic));
    adminClient.close();
}
Also used : NewTopic(org.apache.kafka.clients.admin.NewTopic) Properties(java.util.Properties) AdminClient(org.apache.kafka.clients.admin.AdminClient)

Example 32 with AdminClient

use of org.apache.kafka.clients.admin.AdminClient in project hive by apache.

the class DagUtils method getKafkaDelegationTokenForBrokers.

private void getKafkaDelegationTokenForBrokers(DAG dag, JobConf conf, String kafkaBrokers) {
    LOG.info("Getting kafka credentials for brokers: {}", kafkaBrokers);
    String keytab = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB);
    String principal = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL);
    try {
        principal = SecurityUtil.getServerPrincipal(principal, "0.0.0.0");
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    Properties config = new Properties();
    config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBrokers);
    config.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
    String jaasConfig = String.format("%s %s %s %s serviceName=\"%s\" keyTab=\"%s\" principal=\"%s\";", "com.sun.security.auth.module.Krb5LoginModule required", "debug=true", "useKeyTab=true", "storeKey=true", "kafka", keytab, principal);
    config.put(SaslConfigs.SASL_JAAS_CONFIG, jaasConfig);
    LOG.debug("Jaas config for requesting kafka credentials: {}", jaasConfig);
    AdminClient admin = AdminClient.create(config);
    CreateDelegationTokenOptions createDelegationTokenOptions = new CreateDelegationTokenOptions();
    CreateDelegationTokenResult createResult = admin.createDelegationToken(createDelegationTokenOptions);
    DelegationToken token;
    try {
        token = createResult.delegationToken().get();
    } catch (InterruptedException | ExecutionException e) {
        throw new RuntimeException("Exception while getting kafka delegation tokens", e);
    }
    LOG.info("Got kafka delegation token: {}", token);
    dag.getCredentials().addToken(KAFKA_DELEGATION_TOKEN_KEY, new Token<>(token.tokenInfo().tokenId().getBytes(), token.hmac(), null, new Text("kafka")));
}
Also used : DelegationToken(org.apache.kafka.common.security.token.delegation.DelegationToken) CreateDelegationTokenOptions(org.apache.kafka.clients.admin.CreateDelegationTokenOptions) Text(org.apache.hadoop.io.Text) IOException(java.io.IOException) Properties(java.util.Properties) CreateDelegationTokenResult(org.apache.kafka.clients.admin.CreateDelegationTokenResult) ExecutionException(java.util.concurrent.ExecutionException) AdminClient(org.apache.kafka.clients.admin.AdminClient)

Example 33 with AdminClient

use of org.apache.kafka.clients.admin.AdminClient in project zipkin by openzipkin.

the class KafkaExtension method prepareTopics.

void prepareTopics(String topics, int partitions) {
    Properties config = new Properties();
    config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer());
    List<NewTopic> newTopics = new ArrayList<>();
    for (String topic : topics.split(",")) {
        if ("".equals(topic))
            continue;
        newTopics.add(new NewTopic(topic, partitions, (short) 1));
    }
    try (AdminClient adminClient = AdminClient.create(config)) {
        adminClient.createTopics(newTopics).all().get();
    } catch (InterruptedException | ExecutionException e) {
        if (e.getCause() != null && e.getCause() instanceof TopicExistsException)
            return;
        throw new TestAbortedException("Topics could not be created " + newTopics + ": " + e.getMessage(), e);
    }
}
Also used : ArrayList(java.util.ArrayList) TestAbortedException(org.opentest4j.TestAbortedException) NewTopic(org.apache.kafka.clients.admin.NewTopic) Properties(java.util.Properties) ExecutionException(java.util.concurrent.ExecutionException) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) AdminClient(org.apache.kafka.clients.admin.AdminClient)

Example 34 with AdminClient

use of org.apache.kafka.clients.admin.AdminClient in project cruise-control by linkedin.

the class ExecutorTest method testMoveNonExistingPartition.

@Test
public void testMoveNonExistingPartition() throws InterruptedException {
    ZkUtils zkUtils = KafkaCruiseControlUnitTestUtils.zkUtils(zookeeper().getConnectionString());
    AdminClient adminClient = getAdminClient(broker(0).getPlaintextAddr());
    adminClient.createTopics(Arrays.asList(new NewTopic(TOPIC_0, 1, (short) 1), new NewTopic(TOPIC_1, 1, (short) 2)));
    Map<String, TopicDescription> topicDescriptions = createTopics();
    int initialLeader0 = topicDescriptions.get(TOPIC_0).partitions().get(0).leader().id();
    int initialLeader1 = topicDescriptions.get(TOPIC_1).partitions().get(0).leader().id();
    ExecutionProposal proposal0 = new ExecutionProposal(TP0, 0, initialLeader0, Collections.singletonList(initialLeader0), Collections.singletonList(initialLeader0 == 0 ? 1 : 0));
    ExecutionProposal proposal1 = new ExecutionProposal(TP1, 0, initialLeader1, Arrays.asList(initialLeader1, initialLeader1 == 0 ? 1 : 0), Arrays.asList(initialLeader1 == 0 ? 1 : 0, initialLeader1));
    ExecutionProposal proposal2 = new ExecutionProposal(TP2, 0, initialLeader0, Collections.singletonList(initialLeader0), Collections.singletonList(initialLeader0 == 0 ? 1 : 0));
    ExecutionProposal proposal3 = new ExecutionProposal(TP3, 0, initialLeader1, Arrays.asList(initialLeader1, initialLeader1 == 0 ? 1 : 0), Arrays.asList(initialLeader1 == 0 ? 1 : 0, initialLeader1));
    Collection<ExecutionProposal> proposalsToExecute = Arrays.asList(proposal0, proposal1, proposal2, proposal3);
    Collection<ExecutionProposal> proposalsToCheck = Arrays.asList(proposal0, proposal1);
    executeAndVerifyProposals(zkUtils, proposalsToExecute, proposalsToCheck);
}
Also used : NewTopic(org.apache.kafka.clients.admin.NewTopic) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) ZkUtils(kafka.utils.ZkUtils) AdminClient(org.apache.kafka.clients.admin.AdminClient) Test(org.junit.Test)

Example 35 with AdminClient

use of org.apache.kafka.clients.admin.AdminClient in project cruise-control by linkedin.

the class ExecutorTest method createTopics.

private Map<String, TopicDescription> createTopics() throws InterruptedException {
    AdminClient adminClient = getAdminClient(broker(0).getPlaintextAddr());
    adminClient.createTopics(Arrays.asList(new NewTopic(TOPIC_0, 1, (short) 1), new NewTopic(TOPIC_1, 1, (short) 2)));
    // We need to use the admin clients to query the metadata from two different brokers to make sure that
    // both brokers have the latest metadata. Otherwise the Executor may get confused when it does not
    // see expected topics in the metadata.
    Map<String, TopicDescription> topicDescriptions0 = null;
    Map<String, TopicDescription> topicDescriptions1 = null;
    do {
        try (AdminClient adminClient0 = getAdminClient(broker(0).getPlaintextAddr());
            AdminClient adminClient1 = getAdminClient(broker(1).getPlaintextAddr())) {
            topicDescriptions0 = adminClient0.describeTopics(Arrays.asList(TOPIC_0, TOPIC_1)).all().get();
            topicDescriptions1 = adminClient1.describeTopics(Arrays.asList(TOPIC_0, TOPIC_1)).all().get();
            try {
                Thread.sleep(100);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        } catch (ExecutionException ee) {
        // Let it go.
        }
    } while (topicDescriptions0 == null || topicDescriptions0.size() < 2 || topicDescriptions1 == null || topicDescriptions1.size() < 2);
    return topicDescriptions0;
}
Also used : NewTopic(org.apache.kafka.clients.admin.NewTopic) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) ExecutionException(java.util.concurrent.ExecutionException) AdminClient(org.apache.kafka.clients.admin.AdminClient)

Aggregations

AdminClient (org.apache.kafka.clients.admin.AdminClient)70 Test (org.junit.Test)38 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)31 NewTopic (org.apache.kafka.clients.admin.NewTopic)30 StreamsConfig (org.apache.kafka.streams.StreamsConfig)29 MockAdminClient (org.apache.kafka.clients.admin.MockAdminClient)27 HashMap (java.util.HashMap)24 TopicMetadataAndConfig (org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig)18 TopicDescription (org.apache.kafka.clients.admin.TopicDescription)18 Config (org.apache.kafka.clients.admin.Config)15 Map (java.util.Map)14 ConsumerConfig (org.apache.kafka.clients.consumer.ConsumerConfig)14 ProducerConfig (org.apache.kafka.clients.producer.ProducerConfig)14 TopicConfig (org.apache.kafka.common.config.TopicConfig)13 MockTime (org.apache.kafka.common.utils.MockTime)13 TopicExistsException (org.apache.kafka.common.errors.TopicExistsException)11 ArrayList (java.util.ArrayList)10 TopicPartitionInfo (org.apache.kafka.common.TopicPartitionInfo)10 ConfigResource (org.apache.kafka.common.config.ConfigResource)10 UnknownTopicOrPartitionException (org.apache.kafka.common.errors.UnknownTopicOrPartitionException)10