use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kafka-admin-api by bf2fc6cc711aee1a0c2a.
the class AdminClientFactory method createAdminClient.
/**
* Route handler common to all Kafka resource routes. Responsible for creating
* the map of properties used to configure the Kafka Admin Client. When OAuth
* has been enabled via the environment, the access token will be retrieved from
* the authenticated user principal present in the context (created by Vert.x
* handler when a valid JWT was presented by the client). The configuration property
* map will be placed in the context under the key identified by the
* {@link #ADMIN_CLIENT_CONFIG} constant.
*/
public AdminClient createAdminClient() {
Map<String, Object> acConfig = config.getAcConfig();
if (config.isOauthEnabled()) {
if (token.isResolvable()) {
final String accessToken = token.get().getRawToken();
if (accessToken == null) {
throw new NotAuthorizedException(Response.status(Status.UNAUTHORIZED));
}
acConfig.put(SaslConfigs.SASL_JAAS_CONFIG, String.format(SASL_OAUTH_CONFIG_TEMPLATE, accessToken));
} else {
log.warn("OAuth is enabled, but there is no JWT principal");
}
} else if (config.isBasicEnabled()) {
extractCredentials(Optional.ofNullable(headers.get().getHeaderString(HttpHeaders.AUTHORIZATION))).ifPresentOrElse(credentials -> acConfig.put(SaslConfigs.SASL_JAAS_CONFIG, credentials), () -> {
throw new NotAuthorizedException("Invalid or missing credentials", Response.status(Status.UNAUTHORIZED).build());
});
} else {
log.debug("OAuth is disabled - no attempt to set access token in Admin Client config");
}
return AdminClient.create(acConfig);
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kafka-admin-api by bf2fc6cc711aee1a0c2a.
the class AccessControlListIT method testGetAclsOrderByProperies.
@ParameterizedTest
@CsvSource({ Types.AclBinding.PROP_PERMISSION + "," + SORT_ASC, Types.AclBinding.PROP_PERMISSION + "," + SORT_DESC, Types.AclBinding.PROP_RESOURCE_TYPE + "," + SORT_ASC, Types.AclBinding.PROP_RESOURCE_TYPE + "," + SORT_DESC, Types.AclBinding.PROP_PATTERN_TYPE + "," + SORT_ASC, Types.AclBinding.PROP_PATTERN_TYPE + "," + SORT_DESC, Types.AclBinding.PROP_OPERATION + "," + SORT_ASC, Types.AclBinding.PROP_OPERATION + "," + SORT_DESC, Types.AclBinding.PROP_PRINCIPAL + "," + SORT_ASC, Types.AclBinding.PROP_PRINCIPAL + "," + SORT_DESC, Types.AclBinding.PROP_RESOURCE_NAME + "," + SORT_ASC, Types.AclBinding.PROP_RESOURCE_NAME + "," + SORT_DESC })
void testGetAclsOrderByProperies(String orderKey, String order) throws Exception {
JsonObject allowedResourceOperations = Json.createReader(new StringReader(validResourceOperations)).readObject();
List<JsonObject> newBindings = Stream.of(Json.createObjectBuilder().build()).flatMap(binding -> join(binding, Types.AclBinding.PROP_PERMISSION, AclPermissionType.ALLOW, AclPermissionType.DENY)).flatMap(binding -> join(binding, Types.AclBinding.PROP_RESOURCE_TYPE, ResourceType.TOPIC, ResourceType.GROUP, ResourceType.CLUSTER, ResourceType.TRANSACTIONAL_ID)).flatMap(binding -> join(binding, Types.AclBinding.PROP_PATTERN_TYPE, PatternType.LITERAL, PatternType.PREFIXED)).flatMap(binding -> join(binding, Types.AclBinding.PROP_OPERATION, AclOperation.READ, AclOperation.ALL, AclOperation.ALTER, AclOperation.DELETE, AclOperation.CREATE, AclOperation.ALTER_CONFIGS, AclOperation.DESCRIBE, AclOperation.DESCRIBE_CONFIGS, AclOperation.WRITE)).flatMap(binding -> join(binding, Types.AclBinding.PROP_PRINCIPAL, "User:{uuid}")).flatMap(binding -> join(binding, Types.AclBinding.PROP_RESOURCE_NAME, "resource-{uuid}")).filter(binding -> {
String resourceType = binding.getString(Types.AclBinding.PROP_RESOURCE_TYPE).toLowerCase(Locale.US);
String operation = binding.getString(Types.AclBinding.PROP_OPERATION).toLowerCase(Locale.US);
return allowedResourceOperations.getJsonArray(resourceType).stream().filter(value -> value.getValueType() == ValueType.STRING).map(JsonString.class::cast).map(JsonString::getString).anyMatch(operation::equals);
}).map(binding -> {
if (ResourceType.CLUSTER.name().equals(binding.getString(Types.AclBinding.PROP_RESOURCE_TYPE))) {
// Only value allowed is "kafka-cluster"
binding = Json.createObjectBuilder(binding).add(Types.AclBinding.PROP_RESOURCE_NAME, "kafka-cluster").build();
}
return binding;
}).distinct().collect(Collectors.toList());
List<String> sortKeys = new LinkedList<>(AccessControlOperations.SORT_KEYS.keySet());
// Remove the primary sort key, handled as a special case
sortKeys.remove(orderKey);
List<JsonObject> expectedValues = newBindings.stream().map(JsonObject.class::cast).sorted((j1, j2) -> {
int result;
if ((result = j1.getString(orderKey).compareTo(j2.getString(orderKey))) != 0) {
return SORT_DESC.equals(order) ? (result * -1) : result;
}
for (String key : sortKeys) {
if ((result = j1.getString(key).compareTo(j2.getString(key))) != 0) {
return result;
}
}
return 0;
}).collect(Collectors.toList());
final int expectedTotal = newBindings.size();
final int pageSize = expectedTotal + 1;
final var queryParams = Map.of("page", "1", "size", String.valueOf(pageSize), "orderKey", orderKey, "order", order);
Properties adminConfig = ClientsConfig.getAdminConfigOauth(config, tokenUtils.getToken(UserType.OWNER.getUsername()));
/*
* Due to the number of ACLs created for this case (> 200), using the
* bulk API directly is necessary.
*/
try (Admin admin = Admin.create(adminConfig)) {
admin.createAcls(newBindings.stream().map(Types.AclBinding::fromJsonObject).map(Types.AclBinding::toKafkaBinding).collect(Collectors.toList())).all().whenComplete((result, error) -> {
if (error != null) {
fail(error);
} else {
var response = getAcls(UserType.OWNER, queryParams).body("total", equalTo(expectedTotal)).body("size", equalTo(pageSize)).body("page", equalTo(1)).body("items", hasSize(expectedTotal));
JsonObject responseBody = Json.createReader(response.extract().asInputStream()).readObject();
List<JsonObject> responseValues = responseBody.getJsonArray("items").stream().map(JsonObject.class::cast).collect(Collectors.toList());
assertEquals(expectedValues, responseValues, "Unexpected response order");
}
}).toCompletionStage().toCompletableFuture().get(30, TimeUnit.SECONDS);
}
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kafka-admin-api by bf2fc6cc711aee1a0c2a.
the class DeploymentManager method deployStrimziKafka.
private KafkaContainer<?> deployStrimziKafka() {
LOGGER.info("Deploying Strimzi Kafka container");
class StrimziPlainKafkaContainer extends StrimziKafkaContainer implements KafkaContainer<StrimziKafkaContainer> {
StrimziPlainKafkaContainer(String version) {
super(version);
}
}
String imageTag = System.getProperty("strimzi-kafka.tag");
var container = new StrimziPlainKafkaContainer(imageTag).withLabels(Collections.singletonMap("test-ident", Environment.TEST_CONTAINER_LABEL)).withLogConsumer(new Slf4jLogConsumer(LoggerFactory.getLogger("systemtests.plain-kafka"), true)).withCreateContainerCmdModifier(cmd -> cmd.withName(name("plain-kafka"))).withNetwork(testNetwork);
container.start();
return (KafkaContainer<?>) container;
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class AbstractKafkaCluster method getReadiness.
@Override
public OperandReadiness getReadiness(ManagedKafka managedKafka) {
Kafka kafka = cachedKafka(managedKafka);
if (kafka == null) {
return new OperandReadiness(Status.False, Reason.Installing, String.format("Kafka %s does not exist", kafkaClusterName(managedKafka)));
}
Optional<Condition> notReady = kafkaCondition(kafka, c -> "NotReady".equals(c.getType()));
if (notReady.filter(c -> "True".equals(c.getStatus())).isPresent()) {
Condition c = notReady.get();
return new OperandReadiness(Status.False, "Creating".equals(c.getReason()) ? Reason.Installing : Reason.Error, c.getMessage());
}
if (isStrimziUpdating(managedKafka)) {
// the status here is actually unknown
return new OperandReadiness(Status.True, Reason.StrimziUpdating, null);
}
if (isKafkaUpdating(managedKafka) || isKafkaUpgradeStabilityChecking(managedKafka)) {
return new OperandReadiness(Status.True, Reason.KafkaUpdating, null);
}
if (isKafkaIbpUpdating(managedKafka)) {
return new OperandReadiness(Status.True, Reason.KafkaIbpUpdating, null);
}
Optional<Condition> ready = kafkaCondition(kafka, c -> "Ready".equals(c.getType()));
if (ready.filter(c -> "True".equals(c.getStatus())).isPresent()) {
return new OperandReadiness(Status.True, null, null);
}
if (isReconciliationPaused(managedKafka)) {
// strimzi may in the future report the status even when paused, but for now we don't know
return new OperandReadiness(Status.Unknown, Reason.Paused, String.format("Kafka %s is paused for an unknown reason", kafkaClusterName(managedKafka)));
}
return new OperandReadiness(Status.False, Reason.Installing, String.format("Kafka %s is not providing status", kafkaClusterName(managedKafka)));
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class AbstractKafkaCluster method isKafkaAnnotationUpdating.
private boolean isKafkaAnnotationUpdating(ManagedKafka managedKafka, String annotation, Function<Kafka, String> valueSupplier) {
Kafka kafka = cachedKafka(managedKafka);
if (kafka == null) {
return false;
}
List<Pod> kafkaPods = kubernetesClient.pods().inNamespace(kafka.getMetadata().getNamespace()).withLabel("strimzi.io/name", kafka.getMetadata().getName() + "-kafka").list().getItems();
boolean isKafkaAnnotationUpdating = false;
String expectedValue = valueSupplier.apply(kafka);
for (Pod kafkaPod : kafkaPods) {
String annotationValueOnPod = Optional.ofNullable(kafkaPod.getMetadata().getAnnotations()).map(annotations -> annotations.get(annotation)).orElse(null);
if (annotationValueOnPod == null) {
log.errorf("Kafka pod [%s] is missing annotation '%s'", kafkaPod.getMetadata().getName(), annotation);
throw new RuntimeException();
}
log.tracef("Kafka pod [%s] annotation '%s' = %s [expected value %s]", kafkaPod.getMetadata().getName(), annotation, annotationValueOnPod, expectedValue);
isKafkaAnnotationUpdating |= !annotationValueOnPod.equals(expectedValue);
if (isKafkaAnnotationUpdating) {
break;
}
}
return isKafkaAnnotationUpdating;
}
Aggregations