use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ConditionUtilsTest method testBuildUpdateCondition.
@ParameterizedTest
@ValueSource(strings = { "Ready" })
void testBuildUpdateCondition(String type) {
ManagedKafkaCondition mkcondition = ConditionUtils.buildCondition(ManagedKafkaCondition.Type.valueOf(type), Status.True);
assertEquals("True", mkcondition.getStatus());
assertEquals(type, mkcondition.getType());
ConditionUtils.updateConditionStatus(mkcondition, Status.False, null, null);
assertEquals("False", mkcondition.getStatus());
assertEquals(type, mkcondition.getType());
var mockCondition = Mockito.mock(ManagedKafkaCondition.class, Mockito.CALLS_REAL_METHODS);
ConditionUtils.updateConditionStatus(mockCondition, Status.False, Reason.Deleted, null);
Mockito.verify(mockCondition, Mockito.times(1)).setLastTransitionTime(Mockito.anyString());
// only update if different
ConditionUtils.updateConditionStatus(mockCondition, Status.False, Reason.Deleted, null);
Mockito.verify(mockCondition, Mockito.times(1)).setLastTransitionTime(Mockito.anyString());
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class InstanceProfiler method setup.
private void setup() throws Exception {
readResults();
if (profilingResult.name == null) {
profilingResult.name = "profile-" + Environment.DATE_FORMAT.format(LocalDateTime.now());
}
logDir = new File("target", profilingResult.name);
Files.createDirectories(logDir.toPath());
kafkaCluster = KubeClusterResource.connectToKubeCluster(PerformanceEnvironment.KAFKA_KUBECONFIG);
profilingResult.kafkaNodeType = kafkaCluster.getWorkerNodes().get(0).getMetadata().getLabels().get("node.kubernetes.io/instance-type");
kafkaProvisioner = ManagedKafkaProvisioner.create(kafkaCluster);
kafkaProvisioner.setup();
omb = new OMB(KubeClusterResource.connectToKubeCluster(PerformanceEnvironment.OMB_KUBECONFIG));
omb.install(kafkaProvisioner.getTlsConfig());
// TODO: if there is an existing result, make sure it's the same test setup
profilingResult.ombNodeType = omb.getOmbCluster().getWorkerNodes().get(0).getMetadata().getLabels().get("node.kubernetes.io/instance-type");
profilingResult.ombWorkerNodes = omb.getOmbCluster().getWorkerNodes().size();
AvailableResources resources = getMinAvailableResources(omb.getOmbCluster().getWorkerNodes().stream());
// use all available resources on the worker nodes with 2 workers per node
// if (resources.memoryBytes > 16*ONE_GB || resources.memoryBytes < 8*ONE_GB) {
// throw new IllegalStateException("Client instance types are expected to have 16 GB");
// }
// assume instead resources that will fit on 2xlarge or xlarge
resources.cpuMillis = Math.min(6400, resources.cpuMillis);
resources.memoryBytes = Math.min(12 * ONE_GB, resources.memoryBytes);
omb.setWorkerCpu(Quantity.parse(resources.cpuMillis / 2 + "m"));
omb.setWorkerContainerMemory(Quantity.parse(String.valueOf(resources.memoryBytes / 2)));
profilingResult.ombWorkerCpu = omb.getWorkerCpu();
profilingResult.ombWorkerMemory = omb.getWorkerContainerMemory();
LOGGER.info("OMB Workers will use {} cpu and {} memory requests", omb.getWorkerCpu(), omb.getWorkerContainerMemory());
if (profilingResult.completedStep == null) {
installedProvisioner = true;
kafkaProvisioner.install();
writeResults(Step.SETUP);
}
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type in project srs-fleet-manager by bf2fc6cc711aee1a0c2a.
the class AccountManagementServiceImpl method determineAllowedResourceType.
@Timed(value = Constants.AMS_DETERMINE_ALLOWED_INSTANCE_TIMER, description = Constants.AMS_TIMER_DESCRIPTION)
@Audited
@Timeout(FaultToleranceConstants.TIMEOUT_MS)
@RetryUnwrap
// 3 retries, 200ms jitter
@Retry(retryOn = { RetryWrapperException.class })
@RetryWrap
@Override
public ResourceType determineAllowedResourceType(AccountInfo accountInfo) throws AccountManagementServiceException {
try {
Organization organization = restClient.getOrganizationByExternalId(accountInfo.getOrganizationId());
String orgId = organization.getId();
// Check QuotaCostList for a RHOSR entry with "allowed" quota > 0. If found, then
// return "Standard" as the resource type to create.
QuotaCostList quotaCostList = restClient.getQuotaCostList(orgId, true);
if (quotaCostList.getSize() > 0) {
for (QuotaCost quotaCost : quotaCostList.getItems()) {
// We only care about QuotaCost with "allowed" > 0 and with at least one related resource.
if (quotaCost.getAllowed() != null && quotaCost.getAllowed() > 0 && quotaCost.getRelated_resources() != null && !quotaCost.getRelated_resources().isEmpty() && isRhosrStandardQuota(quotaCost)) {
return ResourceType.REGISTRY_INSTANCE_STANDARD;
}
}
}
// Default to only allow eval.
return ResourceType.REGISTRY_INSTANCE_EVAL;
} catch (AccountManagementSystemClientException ex) {
ExceptionConvert.convert(ex);
// Never returns
return null;
}
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class DebeziumOperandControllerTest method computeStatus.
@ParameterizedTest
@MethodSource
void computeStatus(String connectorState, String conditionType, String conditionReason, String expectedConnectorState) {
ConnectorStatusSpec status = new ConnectorStatusSpec();
DebeziumOperandSupport.computeStatus(status, new KafkaConnectorBuilder().withStatus(new KafkaConnectorStatusBuilder().addToConditions(new ConditionBuilder().withType(conditionType).withReason(conditionReason).build()).addToConnectorStatus("connector", new org.bf2.cos.fleetshard.operator.debezium.model.KafkaConnectorStatusBuilder().withState(connectorState).build()).build()).build());
assertThat(status.getPhase()).isEqualTo(expectedConnectorState);
assertThat(status.getConditions()).anySatisfy(condition -> {
assertThat(condition).hasFieldOrPropertyWithValue("type", conditionType).hasFieldOrPropertyWithValue("reason", conditionReason);
});
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class ConnectorClusterStatusSync method update.
private void update() {
ConnectorClusterStatus status = new ConnectorClusterStatus();
status.setPhase(ConnectorClusterState.READY);
fleetShardClient.getOperators().stream().map(o -> new ConnectorClusterStatusOperators().namespace(o.getMetadata().getNamespace()).operator(new ConnectorOperator().id(o.getMetadata().getName()).type(o.getSpec().getType()).version(o.getSpec().getVersion())).status(Operators.PHASE_READY)).forEach(status::addOperatorsItem);
fleetShardClient.getNamespaces().stream().map(n -> {
ConnectorNamespaceState phase = ConnectorNamespaceState.DISCONNECTED;
if (n.getStatus() != null) {
if (Objects.equals(Namespaces.STATUS_ACTIVE, n.getStatus().getPhase())) {
phase = ConnectorNamespaceState.READY;
} else if (Objects.equals(Namespaces.STATUS_TERMINATING, n.getStatus().getPhase())) {
phase = ConnectorNamespaceState.DELETING;
}
}
return new ConnectorNamespaceStatus().id(n.getMetadata().getLabels().get(Resources.LABEL_NAMESPACE_ID)).version(Resources.getLabel(n, Resources.LABEL_KUBERNETES_VERSION)).connectorsDeployed(fleetShardClient.getConnectors(n).size()).phase(phase);
}).forEach(status::addNamespacesItem);
controlPlane.updateClusterStatus(status);
}
Aggregations