use of io.strimzi.api.kafka.model.storage.EphemeralStorage in project strimzi by strimzi.
the class KafkaClusterOAuthValidationTest method testOAuthAuthnAuthz.
@ParallelTest
public void testOAuthAuthnAuthz() {
List<GenericKafkaListener> listeners = asList(new GenericKafkaListenerBuilder().withName("listener1").withPort(9900).withType(KafkaListenerType.INTERNAL).withAuth(new KafkaListenerAuthenticationOAuthBuilder().withClientId("my-client-id").withValidIssuerUri("http://valid-issuer").withJwksEndpointUri("http://jwks-endpoint").withJwksRefreshSeconds(30).withJwksExpirySeconds(90).withJwksMinRefreshPauseSeconds(5).withConnectTimeoutSeconds(20).withReadTimeoutSeconds(20).withGroupsClaim("$.groups").withMaxSecondsWithoutReauthentication(1800).withNewClientSecret().withSecretName("my-secret-secret").withKey("my-secret-key").endClientSecret().build()).build());
Kafka kafkaAssembly = new KafkaBuilder().withNewMetadata().withName("my-cluster").withNamespace("my-namespace").endMetadata().withNewSpec().withNewKafka().withReplicas(3).withStorage(new EphemeralStorage()).withListeners(listeners).withAuthorization(new KafkaAuthorizationKeycloakBuilder().withTokenEndpointUri("http://token-endpoint").withClientId("my-client-id").withDelegateToKafkaAcls(true).withGrantsRefreshPeriodSeconds(60).withGrantsRefreshPoolSize(5).withSuperUsers("alice", "CN=alice").build()).endKafka().withNewZookeeper().withReplicas(3).withStorage(new EphemeralStorage()).endZookeeper().endSpec().build();
KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS);
}
use of io.strimzi.api.kafka.model.storage.EphemeralStorage in project strimzi-kafka-operator by strimzi.
the class Capacity method generateJbodDiskCapacity.
/**
* Generate JBOD disk capacity configuration for a broker using the supplied storage configuration
*
* @param storage Storage configuration for Kafka cluster
* @param idx Index of the broker
* @return Disk capacity configuration value as a JsonObject for broker idx
*/
private JsonObject generateJbodDiskCapacity(Storage storage, int idx) {
JsonObject json = new JsonObject();
String size = "";
for (SingleVolumeStorage volume : ((JbodStorage) storage).getVolumes()) {
String name = VolumeUtils.createVolumePrefix(volume.getId(), true);
String path = AbstractModel.KAFKA_MOUNT_PATH + "/" + name + "/" + AbstractModel.KAFKA_LOG_DIR + idx;
if (volume instanceof PersistentClaimStorage) {
size = ((PersistentClaimStorage) volume).getSize();
} else if (volume instanceof EphemeralStorage) {
size = ((EphemeralStorage) volume).getSizeLimit();
}
json.put(path, String.valueOf(Capacity.getSizeInMiB(size)));
}
return json;
}
use of io.strimzi.api.kafka.model.storage.EphemeralStorage in project strimzi-kafka-operator by strimzi.
the class KafkaAssemblyOperatorMockTest method testReconcileUpdatesKafkaStorageType.
@ParameterizedTest
@MethodSource("data")
public void testReconcileUpdatesKafkaStorageType(Params params, VertxTestContext context) {
init(params);
AtomicReference<List<PersistentVolumeClaim>> originalPVCs = new AtomicReference<>();
AtomicReference<List<Volume>> originalVolumes = new AtomicReference<>();
AtomicReference<List<Container>> originalInitContainers = new AtomicReference<>();
Checkpoint async = context.checkpoint();
initialReconcile(context).onComplete(context.succeeding(v -> context.verify(() -> {
originalPVCs.set(Optional.ofNullable(client.apps().statefulSets().inNamespace(NAMESPACE).withName(KafkaCluster.kafkaClusterName(CLUSTER_NAME)).get()).map(StatefulSet::getSpec).map(StatefulSetSpec::getVolumeClaimTemplates).orElse(new ArrayList<>()));
originalVolumes.set(Optional.ofNullable(client.apps().statefulSets().inNamespace(NAMESPACE).withName(KafkaCluster.kafkaClusterName(CLUSTER_NAME)).get()).map(StatefulSet::getSpec).map(StatefulSetSpec::getTemplate).map(PodTemplateSpec::getSpec).map(PodSpec::getVolumes).orElse(new ArrayList<>()));
originalInitContainers.set(Optional.ofNullable(client.apps().statefulSets().inNamespace(NAMESPACE).withName(KafkaCluster.kafkaClusterName(CLUSTER_NAME)).get()).map(StatefulSet::getSpec).map(StatefulSetSpec::getTemplate).map(PodTemplateSpec::getSpec).map(PodSpec::getInitContainers).orElse(new ArrayList<>()));
// Update the storage type
// ephemeral -> persistent
// or
// persistent -> ephemeral
Kafka updatedStorageKafka = null;
if (kafkaStorage instanceof EphemeralStorage) {
updatedStorageKafka = new KafkaBuilder(cluster).editSpec().editKafka().withNewPersistentClaimStorage().withSize("123").endPersistentClaimStorage().endKafka().endSpec().build();
} else if (kafkaStorage instanceof PersistentClaimStorage) {
updatedStorageKafka = new KafkaBuilder(cluster).editSpec().editKafka().withNewEphemeralStorage().endEphemeralStorage().endKafka().endSpec().build();
} else {
context.failNow(new Exception("If storage is not ephemeral or persistent something has gone wrong"));
}
kafkaAssembly(NAMESPACE, CLUSTER_NAME).patch(updatedStorageKafka);
LOGGER.info("Updating with changed storage type");
}))).compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME))).onComplete(context.succeeding(v -> context.verify(() -> {
// Check the Volumes and PVCs were not changed
assertPVCs(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalPVCs.get());
assertVolumes(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalVolumes.get());
assertInitContainers(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalInitContainers.get());
async.flag();
})));
}
use of io.strimzi.api.kafka.model.storage.EphemeralStorage in project strimzi-kafka-operator by strimzi.
the class KafkaAssemblyOperatorMockTest method data.
public static Iterable<KafkaAssemblyOperatorMockTest.Params> data() {
int[] replicas = { 1, 3 };
int[] storageOptions = { 0, 1, 2 };
Storage[] kafkaStorageConfigs = { new EphemeralStorage(), new PersistentClaimStorageBuilder().withSize("123").withStorageClass("foo").withDeleteClaim(true).build(), new PersistentClaimStorageBuilder().withSize("123").withStorageClass("foo").withDeleteClaim(false).build() };
SingleVolumeStorage[] zkStorageConfigs = { new EphemeralStorage(), new PersistentClaimStorageBuilder().withSize("123").withStorageClass("foo").withDeleteClaim(true).build(), new PersistentClaimStorageBuilder().withSize("123").withStorageClass("foo").withDeleteClaim(false).build() };
ResourceRequirements[] resources = { new ResourceRequirementsBuilder().addToLimits("cpu", new Quantity("5000m")).addToLimits("memory", new Quantity("5000m")).addToRequests("cpu", new Quantity("5000")).addToRequests("memory", new Quantity("5000m")).build() };
List<KafkaAssemblyOperatorMockTest.Params> result = new ArrayList();
for (int replicaCount : replicas) {
for (int storage : storageOptions) {
for (ResourceRequirements resource : resources) {
result.add(new KafkaAssemblyOperatorMockTest.Params(replicaCount, zkStorageConfigs[storage], replicaCount, kafkaStorageConfigs[storage], resource));
}
}
}
return result;
}
use of io.strimzi.api.kafka.model.storage.EphemeralStorage in project strimzi-kafka-operator by strimzi.
the class KafkaAssemblyOperatorTest method data.
public static Iterable<Params> data() {
boolean[] metricsOpenShiftAndEntityOperatorOptions = { true, false };
SingleVolumeStorage[] storageConfig = { new EphemeralStorage(), new PersistentClaimStorageBuilder().withSize("123").withStorageClass("foo").withDeleteClaim(true).build() };
List<Map<String, Object>> configs = asList(null, emptyMap(), singletonMap("foo", "bar"));
List<Params> result = new ArrayList<>();
for (boolean metricsOpenShiftAndEntityOperator : metricsOpenShiftAndEntityOperatorOptions) {
for (Map<String, Object> config : configs) {
for (SingleVolumeStorage storage : storageConfig) {
EntityOperatorSpec eoConfig;
if (metricsOpenShiftAndEntityOperator) {
eoConfig = new EntityOperatorSpecBuilder().withUserOperator(new EntityUserOperatorSpecBuilder().build()).withTopicOperator(new EntityTopicOperatorSpecBuilder().build()).build();
} else {
eoConfig = null;
}
List<GenericKafkaListener> listeners = new ArrayList<>(3);
listeners.add(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().build());
listeners.add(new GenericKafkaListenerBuilder().withName("tls").withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build());
if (metricsOpenShiftAndEntityOperator) {
// On OpenShift, use Routes
listeners.add(new GenericKafkaListenerBuilder().withName("external").withPort(9094).withType(KafkaListenerType.ROUTE).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build());
} else {
// On Kube, use nodeports
listeners.add(new GenericKafkaListenerBuilder().withName("external").withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build());
}
result.add(new Params(metricsOpenShiftAndEntityOperator, metricsOpenShiftAndEntityOperator, listeners, config, config, storage, storage, eoConfig));
}
}
}
return result;
}
Aggregations