use of org.bf2.cos.fleetshard.support.resources.Resources in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class InstanceProfiler method setup.
private void setup() throws Exception {
readResults();
if (profilingResult.name == null) {
profilingResult.name = "profile-" + Environment.DATE_FORMAT.format(LocalDateTime.now());
}
logDir = new File("target", profilingResult.name);
Files.createDirectories(logDir.toPath());
kafkaCluster = KubeClusterResource.connectToKubeCluster(PerformanceEnvironment.KAFKA_KUBECONFIG);
profilingResult.kafkaNodeType = kafkaCluster.getWorkerNodes().get(0).getMetadata().getLabels().get("node.kubernetes.io/instance-type");
kafkaProvisioner = ManagedKafkaProvisioner.create(kafkaCluster);
kafkaProvisioner.setup();
omb = new OMB(KubeClusterResource.connectToKubeCluster(PerformanceEnvironment.OMB_KUBECONFIG));
omb.install(kafkaProvisioner.getTlsConfig());
// TODO: if there is an existing result, make sure it's the same test setup
profilingResult.ombNodeType = omb.getOmbCluster().getWorkerNodes().get(0).getMetadata().getLabels().get("node.kubernetes.io/instance-type");
profilingResult.ombWorkerNodes = omb.getOmbCluster().getWorkerNodes().size();
AvailableResources resources = getMinAvailableResources(omb.getOmbCluster().getWorkerNodes().stream());
// use all available resources on the worker nodes with 2 workers per node
// if (resources.memoryBytes > 16*ONE_GB || resources.memoryBytes < 8*ONE_GB) {
// throw new IllegalStateException("Client instance types are expected to have 16 GB");
// }
// assume instead resources that will fit on 2xlarge or xlarge
resources.cpuMillis = Math.min(6400, resources.cpuMillis);
resources.memoryBytes = Math.min(12 * ONE_GB, resources.memoryBytes);
omb.setWorkerCpu(Quantity.parse(resources.cpuMillis / 2 + "m"));
omb.setWorkerContainerMemory(Quantity.parse(String.valueOf(resources.memoryBytes / 2)));
profilingResult.ombWorkerCpu = omb.getWorkerCpu();
profilingResult.ombWorkerMemory = omb.getWorkerContainerMemory();
LOGGER.info("OMB Workers will use {} cpu and {} memory requests", omb.getWorkerCpu(), omb.getWorkerContainerMemory());
if (profilingResult.completedStep == null) {
installedProvisioner = true;
kafkaProvisioner.install();
writeResults(Step.SETUP);
}
}
use of org.bf2.cos.fleetshard.support.resources.Resources in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class InstanceProfiler method deployIfNeeded.
private void deployIfNeeded(String name) throws Exception {
ManagedKafka mk = null;
Resource<ManagedKafka> mkResource = kafkaCluster.kubeClient().client().resources(ManagedKafka.class).inNamespace(Constants.KAFKA_NAMESPACE).withName(name);
try {
mk = mkResource.get();
} catch (KubernetesClientException e) {
}
ManagedKafkaDeployment kd = null;
if (mk == null) {
if (!installedProvisioner) {
// TODO: come up with a better resume logic here - it currently has to recreate everything
installedProvisioner = true;
kafkaProvisioner.install();
}
kafkaProvisioner.removeClusters(true);
kd = kafkaProvisioner.deployCluster(name, profilingResult.capacity, profilingResult.config);
} else {
// TODO validate config / capacity
kd = new ManagedKafkaDeployment(mk, kafkaCluster);
kd.start();
}
instanceBootstrap = kd.waitUntilReady();
}
use of org.bf2.cos.fleetshard.support.resources.Resources in project srs-fleet-manager by bf2fc6cc711aee1a0c2a.
the class AccountManagementServiceImpl method createResource.
@Timed(value = Constants.AMS_CREATE_TIMER, description = Constants.AMS_TIMER_DESCRIPTION)
@Audited(extractResult = KEY_AMS_SUBSCRIPTION_ID)
// but AMS still performs the reservation.
@Override
public String createResource(AccountInfo accountInfo, ResourceType resourceType) throws TermsRequiredException, ResourceLimitReachedException, AccountManagementServiceException {
try {
boolean termsAccepted = false;
String siteCode = amsProperties.termsSiteCode;
List<String> eventCodes = amsProperties.termsEventCode;
for (String eventCode : eventCodes) {
final TermsReview termsReview = new TermsReview();
termsReview.setAccountUsername(accountInfo.getAccountUsername());
termsReview.setSiteCode(siteCode);
termsReview.setEventCode(eventCode);
// Check if the user has accepted the Terms & Conditions
final ResponseTermsReview responseTermsReview = restClient.termsReview(termsReview);
boolean accepted = !responseTermsReview.getTermsRequired();
// Terms are accepted if *any* of the T&C checks come back as "accepted"
termsAccepted = termsAccepted || accepted;
}
if (!termsAccepted) {
throw new TermsRequiredException(accountInfo.getAccountUsername());
}
// TODO Workaround: Remove this once we have RHOSRTrial working.
if (resourceType == ResourceType.REGISTRY_INSTANCE_EVAL) {
log.debug("Creating an eval instance for '{}' in org '{}' without calling AMS.", accountInfo.getAccountUsername(), accountInfo.getOrganizationId());
return null;
}
// Set the productId and resourceName based on if it's an Eval or Standard instance
String productId = amsProperties.standardProductId;
String resourceName = amsProperties.standardResourceName;
if (resourceType == ResourceType.REGISTRY_INSTANCE_EVAL) {
productId = amsProperties.evalProductId;
resourceName = amsProperties.evalResourceName;
}
// Build a quota resource ID to pass to AMS
final var quotaResource = ReservedResource.builder().resourceType(amsProperties.resourceType).byoc(false).resourceName(resourceName).billingModel("marketplace").availabilityZone("single").count(1).build();
// Create the cluster authorization REST operation input
final ClusterAuthorization clusterAuthorization = ClusterAuthorization.builder().accountUsername(accountInfo.getAccountUsername()).productId(productId).managed(true).byoc(false).cloudProviderId("aws").reserve(true).availabilityZone("single").clusterId(UUID.randomUUID().toString()).resources(Collections.singletonList(quotaResource)).build();
// Consume quota from AMS via the AMS REST API
final ClusterAuthorizationResponse clusterAuthorizationResponse = restClient.clusterAuthorization(clusterAuthorization);
if (clusterAuthorizationResponse.getAllowed()) {
return clusterAuthorizationResponse.getSubscription().getId();
} else {
// User not allowed to create resource
throw new ResourceLimitReachedException();
}
} catch (AccountManagementSystemClientException ex) {
ExceptionConvert.convert(ex);
// Never returns
return null;
}
}
use of org.bf2.cos.fleetshard.support.resources.Resources in project srs-fleet-manager by bf2fc6cc711aee1a0c2a.
the class ProvisionRegistryTenantWorker method execute.
@Transactional
@Override
public void execute(Task aTask, WorkerContext ctl) throws RegistryStorageConflictException, TenantManagerServiceException {
// TODO Split along failure points?
ProvisionRegistryTenantTask task = (ProvisionRegistryTenantTask) aTask;
Optional<RegistryData> registryOptional = storage.getRegistryById(task.getRegistryId());
// NOTE: Failure point 1
if (registryOptional.isEmpty()) {
ctl.retry();
}
RegistryData registry = registryOptional.get();
RegistryDeploymentData registryDeployment = registry.getRegistryDeployment();
// NOTE: Failure point 2
if (registryDeployment == null) {
// Either the schedule task didn't run yet, or we are in trouble
ctl.retry();
}
String registryUrl = registryDeployment.getRegistryDeploymentUrl();
// New approach: configure the deployment URL with a replacement like: https://TENANT_ID.shrd.sr.openshift.com
if (registryUrl.contains("TENANT_ID")) {
registryUrl = registryUrl.replace("TENANT_ID", registry.getId());
} else {
// Old approach: configure the deployment URL without a replacement, and just add "/t/TENANT_ID" to the end of it.
if (!registryUrl.endsWith("/")) {
registryUrl += "/";
}
registryUrl += "t/" + registry.getId();
}
registry.setRegistryUrl(registryUrl);
// Avoid accidentally creating orphan tenants
if (task.getRegistryTenantId() == null) {
CreateTenantRequest tenantRequest = CreateTenantRequest.builder().tenantId(registry.getId()).createdBy(registry.getOwner()).organizationId(registry.getOrgId()).resources(plansService.determineQuotaPlan(registry.getOrgId()).getResources()).build();
TenantManagerConfig tenantManager = Utils.createTenantManagerConfig(registryDeployment);
// NOTE: Failure point 4
tmClient.createTenant(tenantManager, tenantRequest);
task.setRegistryTenantId(registry.getId());
}
// Add expiration task if this is an eval instance
if (isEvalInstance(registry.getInstanceType())) {
var expiration = Instant.now().plus(Duration.ofSeconds(evalLifetimeSeconds));
log.debug("Scheduling an expiration task for the eval instance {} to be executed at {}", registry, expiration);
ctl.delay(() -> tasks.submit(EvalInstanceExpirationRegistryTask.builder().registryId(registry.getId()).schedule(TaskSchedule.builder().firstExecuteAt(expiration).build()).build()));
}
// NOTE: Failure point 5
registry.setStatus(RegistryStatusValueDto.READY.value());
storage.createOrUpdateRegistry(registry);
// TODO This task is (temporarily) not used. Enable when needed.
// Update status to available in the heartbeat task, which should run ASAP
// ctl.delay(() -> tasks.submit(RegistryHeartbeatTask.builder().registryId(registry.getId()).build()));
}
use of org.bf2.cos.fleetshard.support.resources.Resources in project srs-fleet-manager by bf2fc6cc711aee1a0c2a.
the class QuotaPlanIT method testQuotaPlan.
@Test
void testQuotaPlan() throws Exception {
var alice = new AccountInfo("alice", "alice", false, 1L);
var registry1 = new RegistryCreate();
registry1.setName("registry-basic");
var registry1Result = FleetManagerApi.createRegistry(registry1, alice);
assertNotEquals(RegistryStatusValue.failed, registry1Result.getStatus());
Awaitility.await("registry available").atMost(30, TimeUnit.SECONDS).pollInterval(5, TimeUnit.SECONDS).until(() -> {
var reg = FleetManagerApi.getRegistry(registry1Result.getId(), alice);
return reg.getStatus().equals(RegistryStatusValue.ready);
});
var bob = new AccountInfo("bob", "bob", false, 2L);
var registry2 = new RegistryCreate();
registry2.setName("registry-premium");
var registry2Result = FleetManagerApi.createRegistry(registry2, bob);
assertNotEquals(RegistryStatusValue.failed, registry2Result.getStatus());
Awaitility.await("registry available").atMost(30, TimeUnit.SECONDS).pollInterval(5, TimeUnit.SECONDS).until(() -> {
var reg = FleetManagerApi.getRegistry(registry2Result.getId(), bob);
return reg.getStatus().equals(RegistryStatusValue.ready);
});
TenantManagerClient tenantManager = Utils.createTenantManagerClient();
// basic
var tenant = tenantManager.getTenant(registry1Result.getId());
var resources = tenant.getResources();
Long l = null;
for (var r : resources) {
if (r.getType() == ResourceType.MAX_TOTAL_SCHEMAS_COUNT) {
l = r.getLimit();
}
}
assertNotNull(l);
assertEquals(10, l);
// premium
tenant = tenantManager.getTenant(registry2Result.getId());
resources = tenant.getResources();
l = null;
for (var r : resources) {
if (r.getType() == ResourceType.MAX_TOTAL_SCHEMAS_COUNT) {
l = r.getLimit();
}
}
assertNotNull(l);
assertEquals(100, l);
// Update the limit value and recheck after forced reconciliation
for (var r : resources) {
if (r.getType() == ResourceType.MAX_TOTAL_SCHEMAS_COUNT) {
r.setLimit(-1L);
}
}
var ur = new UpdateRegistryTenantRequest();
ur.setResources(resources);
tenantManager.updateTenant(registry2Result.getId(), ur);
// Check updated
tenant = tenantManager.getTenant(registry2Result.getId());
resources = tenant.getResources();
l = null;
for (var r : resources) {
if (r.getType() == ResourceType.MAX_TOTAL_SCHEMAS_COUNT) {
l = r.getLimit();
}
}
assertNotNull(l);
assertEquals(-1, l);
// Restart fleet manager(s) so the quota plan is reconciled
TestInfraManager.getInstance().restartFleetManager();
tenant = tenantManager.getTenant(registry2Result.getId());
resources = tenant.getResources();
l = null;
for (var r : resources) {
if (r.getType() == ResourceType.MAX_TOTAL_SCHEMAS_COUNT) {
l = r.getLimit();
}
}
assertNotNull(l);
assertEquals(100, l);
// Delete
FleetManagerApi.deleteRegistry(registry1Result.getId(), alice);
FleetManagerApi.deleteRegistry(registry2Result.getId(), bob);
}
Aggregations