use of org.apache.kafka.clients.admin.AdminClient in project hive by apache.
the class SingleNodeKafkaCluster method createTopic.
private void createTopic(String topic) {
Properties properties = new Properties();
properties.setProperty("bootstrap.servers", "localhost:9092");
properties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
properties.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
int numPartitions = 1;
short replicationFactor = 1;
AdminClient adminClient = AdminClient.create(properties);
NewTopic newTopic = new NewTopic(topic, numPartitions, replicationFactor);
adminClient.createTopics(Collections.singletonList(newTopic));
adminClient.close();
}
use of org.apache.kafka.clients.admin.AdminClient in project hive by apache.
the class DagUtils method getKafkaDelegationTokenForBrokers.
private void getKafkaDelegationTokenForBrokers(DAG dag, JobConf conf, String kafkaBrokers) {
LOG.info("Getting kafka credentials for brokers: {}", kafkaBrokers);
String keytab = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB);
String principal = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL);
try {
principal = SecurityUtil.getServerPrincipal(principal, "0.0.0.0");
} catch (IOException e) {
throw new RuntimeException(e);
}
Properties config = new Properties();
config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBrokers);
config.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
String jaasConfig = String.format("%s %s %s %s serviceName=\"%s\" keyTab=\"%s\" principal=\"%s\";", "com.sun.security.auth.module.Krb5LoginModule required", "debug=true", "useKeyTab=true", "storeKey=true", "kafka", keytab, principal);
config.put(SaslConfigs.SASL_JAAS_CONFIG, jaasConfig);
LOG.debug("Jaas config for requesting kafka credentials: {}", jaasConfig);
AdminClient admin = AdminClient.create(config);
CreateDelegationTokenOptions createDelegationTokenOptions = new CreateDelegationTokenOptions();
CreateDelegationTokenResult createResult = admin.createDelegationToken(createDelegationTokenOptions);
DelegationToken token;
try {
token = createResult.delegationToken().get();
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException("Exception while getting kafka delegation tokens", e);
}
LOG.info("Got kafka delegation token: {}", token);
dag.getCredentials().addToken(KAFKA_DELEGATION_TOKEN_KEY, new Token<>(token.tokenInfo().tokenId().getBytes(), token.hmac(), null, new Text("kafka")));
}
use of org.apache.kafka.clients.admin.AdminClient in project zipkin by openzipkin.
the class KafkaExtension method prepareTopics.
void prepareTopics(String topics, int partitions) {
Properties config = new Properties();
config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer());
List<NewTopic> newTopics = new ArrayList<>();
for (String topic : topics.split(",")) {
if ("".equals(topic))
continue;
newTopics.add(new NewTopic(topic, partitions, (short) 1));
}
try (AdminClient adminClient = AdminClient.create(config)) {
adminClient.createTopics(newTopics).all().get();
} catch (InterruptedException | ExecutionException e) {
if (e.getCause() != null && e.getCause() instanceof TopicExistsException)
return;
throw new TestAbortedException("Topics could not be created " + newTopics + ": " + e.getMessage(), e);
}
}
use of org.apache.kafka.clients.admin.AdminClient in project cruise-control by linkedin.
the class ExecutorTest method testMoveNonExistingPartition.
@Test
public void testMoveNonExistingPartition() throws InterruptedException {
ZkUtils zkUtils = KafkaCruiseControlUnitTestUtils.zkUtils(zookeeper().getConnectionString());
AdminClient adminClient = getAdminClient(broker(0).getPlaintextAddr());
adminClient.createTopics(Arrays.asList(new NewTopic(TOPIC_0, 1, (short) 1), new NewTopic(TOPIC_1, 1, (short) 2)));
Map<String, TopicDescription> topicDescriptions = createTopics();
int initialLeader0 = topicDescriptions.get(TOPIC_0).partitions().get(0).leader().id();
int initialLeader1 = topicDescriptions.get(TOPIC_1).partitions().get(0).leader().id();
ExecutionProposal proposal0 = new ExecutionProposal(TP0, 0, initialLeader0, Collections.singletonList(initialLeader0), Collections.singletonList(initialLeader0 == 0 ? 1 : 0));
ExecutionProposal proposal1 = new ExecutionProposal(TP1, 0, initialLeader1, Arrays.asList(initialLeader1, initialLeader1 == 0 ? 1 : 0), Arrays.asList(initialLeader1 == 0 ? 1 : 0, initialLeader1));
ExecutionProposal proposal2 = new ExecutionProposal(TP2, 0, initialLeader0, Collections.singletonList(initialLeader0), Collections.singletonList(initialLeader0 == 0 ? 1 : 0));
ExecutionProposal proposal3 = new ExecutionProposal(TP3, 0, initialLeader1, Arrays.asList(initialLeader1, initialLeader1 == 0 ? 1 : 0), Arrays.asList(initialLeader1 == 0 ? 1 : 0, initialLeader1));
Collection<ExecutionProposal> proposalsToExecute = Arrays.asList(proposal0, proposal1, proposal2, proposal3);
Collection<ExecutionProposal> proposalsToCheck = Arrays.asList(proposal0, proposal1);
executeAndVerifyProposals(zkUtils, proposalsToExecute, proposalsToCheck);
}
use of org.apache.kafka.clients.admin.AdminClient in project cruise-control by linkedin.
the class ExecutorTest method createTopics.
private Map<String, TopicDescription> createTopics() throws InterruptedException {
AdminClient adminClient = getAdminClient(broker(0).getPlaintextAddr());
adminClient.createTopics(Arrays.asList(new NewTopic(TOPIC_0, 1, (short) 1), new NewTopic(TOPIC_1, 1, (short) 2)));
// We need to use the admin clients to query the metadata from two different brokers to make sure that
// both brokers have the latest metadata. Otherwise the Executor may get confused when it does not
// see expected topics in the metadata.
Map<String, TopicDescription> topicDescriptions0 = null;
Map<String, TopicDescription> topicDescriptions1 = null;
do {
try (AdminClient adminClient0 = getAdminClient(broker(0).getPlaintextAddr());
AdminClient adminClient1 = getAdminClient(broker(1).getPlaintextAddr())) {
topicDescriptions0 = adminClient0.describeTopics(Arrays.asList(TOPIC_0, TOPIC_1)).all().get();
topicDescriptions1 = adminClient1.describeTopics(Arrays.asList(TOPIC_0, TOPIC_1)).all().get();
try {
Thread.sleep(100);
} catch (InterruptedException e) {
e.printStackTrace();
}
} catch (ExecutionException ee) {
// Let it go.
}
} while (topicDescriptions0 == null || topicDescriptions0.size() < 2 || topicDescriptions1 == null || topicDescriptions1.size() < 2);
return topicDescriptions0;
}
Aggregations