use of org.candlepin.model.ConsumerType in project candlepin by candlepin.
the class ConsumerImporter method store.
public void store(Owner owner, ConsumerDTO consumer, ConflictOverrides forcedConflicts, IdentityCertificate idcert) throws SyncDataFormatException {
if (consumer.getUuid() == null) {
throw new SyncDataFormatException(i18n.tr("No ID for upstream subscription management application."));
}
// Make sure no other owner is already using this upstream UUID:
Owner alreadyUsing = curator.lookupWithUpstreamUuid(consumer.getUuid());
if (alreadyUsing != null && !alreadyUsing.getKey().equals(owner.getKey())) {
log.error("Cannot import manifest for org: {}", owner.getKey());
log.error("Upstream distributor {} already in used by org: {}", consumer.getUuid(), alreadyUsing.getKey());
// delete their manifest after which it could be used elsewhere.
throw new SyncDataFormatException(i18n.tr("This subscription management application has already been imported by another owner."));
}
if (owner.getUpstreamUuid() != null && !owner.getUpstreamUuid().equals(consumer.getUuid())) {
if (!forcedConflicts.isForced(Importer.Conflict.DISTRIBUTOR_CONFLICT)) {
throw new ImportConflictException(i18n.tr("Owner has already imported from another subscription management application."), Importer.Conflict.DISTRIBUTOR_CONFLICT);
} else {
log.warn("Forcing import from a new distributor for org: {}", owner.getKey());
log.warn("Old distributor UUID: {}", owner.getUpstreamUuid());
log.warn("New distributor UUID: {}", consumer.getUuid());
}
}
/*
* WARNING: Strange quirk here, we create a certificate serial object here which does not
* match the actual serial of the identity certificate. Presumably this is to prevent
* potential conflicts with a serial that came from somewhere else. This is consistent with
* importing entitlement certs (as subscription certs).
*/
if (idcert != null) {
CertificateSerial cs = new CertificateSerial();
cs.setCollected(idcert.getSerial().isCollected());
cs.setExpiration(idcert.getSerial().getExpiration());
cs.setUpdated(idcert.getSerial().getUpdated());
cs.setCreated(idcert.getSerial().getCreated());
serialCurator.create(cs);
idcert.setId(null);
idcert.setSerial(cs);
idCertCurator.create(idcert);
}
// create an UpstreamConsumer from the imported ConsumerDto
ConsumerType type = new ConsumerType();
populateEntity(type, consumer.getType());
Owner ownerToUse = new Owner();
if (consumer.getOwner() != null) {
populateEntity(ownerToUse, consumer.getOwner());
}
UpstreamConsumer uc = new UpstreamConsumer(consumer.getName(), ownerToUse, type, consumer.getUuid());
uc.setWebUrl(consumer.getUrlWeb());
uc.setApiUrl(consumer.getUrlApi());
uc.setIdCert(idcert);
uc.setContentAccessMode(consumer.getContentAccessMode());
owner.setUpstreamConsumer(uc);
curator.merge(owner);
}
use of org.candlepin.model.ConsumerType in project candlepin by candlepin.
the class ConsumerTypeImporter method store.
/**
* @param consumerTypes Set of different consumer types.
*/
public void store(Set<ConsumerType> consumerTypes) {
log.debug("Creating/updating consumer types");
for (ConsumerType consumerType : consumerTypes) {
if (curator.lookupByLabel(consumerType.getLabel()) == null) {
curator.create(consumerType);
log.debug("Created consumer type: " + consumerType.getLabel());
}
}
}
use of org.candlepin.model.ConsumerType in project candlepin by candlepin.
the class ConsumerTypeImporter method createObject.
public ConsumerType createObject(ObjectMapper mapper, Reader reader) throws IOException {
ConsumerTypeDTO consumerTypeDTO = mapper.readValue(reader, ConsumerTypeDTO.class);
ConsumerType consumerType = new ConsumerType();
consumerType.setManifest(consumerTypeDTO.isManifest() != null ? consumerTypeDTO.isManifest() : false);
consumerType.setLabel(consumerTypeDTO.getLabel());
consumerType.setId(null);
return consumerType;
}
use of org.candlepin.model.ConsumerType in project candlepin by candlepin.
the class CandlepinPoolManager method revokeEntitlements.
/**
* Revokes the given set of entitlements.
*
* @param entsToRevoke entitlements to revoke
* @param alreadyDeletedPools pools to skip deletion as they have already been deleted
* @param regenCertsAndStatuses if this revocation should also trigger regeneration of certificates
* and recomputation of statuses. For performance reasons some callers might
* choose to set this to false.
*/
@Transactional
@Traceable
public void revokeEntitlements(List<Entitlement> entsToRevoke, Set<String> alreadyDeletedPools, boolean regenCertsAndStatuses) {
if (CollectionUtils.isEmpty(entsToRevoke)) {
return;
}
log.debug("Starting batch revoke of {} entitlements", entsToRevoke.size());
if (log.isTraceEnabled()) {
log.trace("Entitlements IDs: {}", getEntIds(entsToRevoke));
}
Set<Pool> poolsToDelete = this.poolCurator.listBySourceEntitlements(entsToRevoke);
log.debug("Found {} additional pools to delete from source entitlements", poolsToDelete.size());
if (log.isTraceEnabled()) {
log.trace("Additional pool IDs: {}", getPoolIds(poolsToDelete));
}
List<Pool> poolsToLock = new ArrayList<>();
poolsToLock.addAll(poolsToDelete);
for (Entitlement ent : entsToRevoke) {
poolsToLock.add(ent.getPool());
// associated pool as well.
if (ent.getPool() != null && ent.getPool().isDevelopmentPool()) {
poolsToDelete.add(ent.getPool());
}
}
poolCurator.lockAndLoad(poolsToLock);
log.info("Batch revoking {} entitlements", entsToRevoke.size());
entsToRevoke = new ArrayList<>(entsToRevoke);
for (Pool pool : poolsToDelete) {
for (Entitlement ent : pool.getEntitlements()) {
ent.setDeletedFromPool(true);
entsToRevoke.add(ent);
}
}
log.debug("Adjusting consumed quantities on pools");
List<Pool> poolsToSave = new ArrayList<>();
Set<String> entIdsToRevoke = new HashSet<>();
for (Entitlement ent : entsToRevoke) {
// or just continue silently ignoring them?
if (ent == null || ent.getId() == null) {
continue;
}
// Collect the entitlement IDs to revoke seeing as we are iterating over them anyway.
entIdsToRevoke.add(ent.getId());
// We need to trigger lazy load of provided products
// to have access to those products later in this method.
Pool pool = ent.getPool();
int entQuantity = ent.getQuantity() != null ? ent.getQuantity() : 0;
pool.setConsumed(pool.getConsumed() - entQuantity);
Consumer consumer = ent.getConsumer();
ConsumerType ctype = this.consumerTypeCurator.getConsumerType(consumer);
if (ctype != null) {
if (ctype.isManifest()) {
pool.setExported(pool.getExported() - entQuantity);
} else if (ctype.isType(ConsumerTypeEnum.SHARE)) {
pool.setShared(pool.getShared() - entQuantity);
}
}
consumer.setEntitlementCount(consumer.getEntitlementCount() - entQuantity);
consumerCurator.update(consumer);
poolsToSave.add(pool);
}
poolCurator.updateAll(poolsToSave, false, false);
/*
* Before deleting the entitlements, we need to find out if there are any
* modifier entitlements that need to have their certificates regenerated
*/
if (regenCertsAndStatuses) {
log.debug("Marking dependent entitlements as dirty...");
int update = this.entitlementCurator.markDependentEntitlementsDirty(entIdsToRevoke);
log.debug("{} dependent entitlements marked dirty.", update);
}
log.info("Starting batch delete of pools");
poolCurator.batchDelete(poolsToDelete, alreadyDeletedPools);
log.info("Starting batch delete of entitlements");
entitlementCurator.batchDelete(entsToRevoke);
log.info("Starting delete flush");
entitlementCurator.flush();
log.info("All deletes flushed successfully");
Map<Consumer, List<Entitlement>> consumerSortedEntitlements = entitlementCurator.getDistinctConsumers(entsToRevoke);
filterAndUpdateStackingEntitlements(consumerSortedEntitlements, alreadyDeletedPools);
// post unbind actions
for (Entitlement ent : entsToRevoke) {
enforcer.postUnbind(ent.getConsumer(), this, ent);
}
if (!regenCertsAndStatuses) {
log.info("Regeneration and status computation was not requested finishing batch revoke");
sendDeletedEvents(entsToRevoke);
return;
}
log.info("Recomputing status for {} consumers.", consumerSortedEntitlements.size());
int i = 1;
for (Consumer consumer : consumerSortedEntitlements.keySet()) {
if (i++ % 1000 == 0) {
consumerCurator.flush();
}
complianceRules.getStatus(consumer);
}
consumerCurator.flush();
log.info("All statuses recomputed.");
sendDeletedEvents(entsToRevoke);
}
use of org.candlepin.model.ConsumerType in project candlepin by candlepin.
the class CandlepinPoolManager method deletePools.
@Override
@Transactional
@Traceable
@SuppressWarnings("checkstyle:methodlength")
public void deletePools(Collection<Pool> pools, Collection<String> alreadyDeletedPoolIds) {
if (pools == null || pools.isEmpty()) {
return;
}
log.info("Attempting to delete {} pools...", pools.size());
// than they need to be and is resulting in running slow calculations multiple times.
if (alreadyDeletedPoolIds == null) {
alreadyDeletedPoolIds = new HashSet<>();
}
Set<String> poolIds = new HashSet<>();
Set<String> entitlementIds = new HashSet<>();
Owner owner = null;
// Convert pools to pool IDs.
log.info("Fetching related pools and entitlements...");
for (Pool pool : pools) {
if (owner == null) {
owner = pool.getOwner();
}
poolIds.add(pool.getId());
}
// Fetch pools which are derived from the pools we're going to delete...
poolIds.addAll(this.poolCurator.getDerivedPoolIdsForPools(poolIds));
// Fetch related pools and entitlements (recursively)
Collection<String> pids = poolIds;
int cachedSize;
do {
// Fetch entitlement IDs for our set of pools
Collection<String> eids = this.poolCurator.getEntitlementIdsForPools(pids);
// Fetch pools which are derived from these entitlements...
pids = this.poolCurator.getPoolIdsForSourceEntitlements(eids);
// Fetch stack derived pools which will be unentitled when we revoke entitlements
// Impl note: This may occassionally miss stack derived pools in cases where our
// entitlement count exceeds the IN block limitations. In those cases, we'll end
// up doing a recursive call into this method, which sucks, but will still work.
pids.addAll(this.poolCurator.getUnentitledStackDerivedPoolIds(eids));
// Fetch pools which are derived from the pools we're going to delete...
pids.addAll(this.poolCurator.getDerivedPoolIdsForPools(pids));
// Add the new entitlement and pool IDs to our list of things to delete
cachedSize = poolIds.size();
entitlementIds.addAll(eids);
poolIds.addAll(pids);
} while (poolIds.size() != cachedSize);
// TODO: Remove this and stop recursively calling into this method.
if (alreadyDeletedPoolIds != null) {
poolIds.removeAll(alreadyDeletedPoolIds);
}
// Lock pools we're going to delete (also, fetch them for event generation/slow deletes)
pools = this.poolCurator.lockAndLoadByIds(poolIds);
if (!pools.isEmpty()) {
log.info("Locked {} pools for deletion...", pools.size());
// Impl note:
// There is a fair bit of duplicated work between the actions below this block and
// methods like revokeEntitlements. However, the decision was made to decouple these
// methods explicitly to avoid situations such as fetching collections of pools, getting
// entitlements from them (a slow process in itself) and then passing it off to another
// standalone method which repeats the process of fetching pools and related entitlements.
//
// More work can be done in revokeEntitlements to optimize that method and maybe make it
// slightly more generic so that this work can be offloaded to it again. Though, at the time
// of writing, that's no small undertaking. Even changing this method has far-reaching
// consequences when trying to remove direct uses of entities as far as interoperability is
// concerned. Going forward we need to be more aware of the amount of duplication we're
// adding to our code when writing standlone/generic utility methods and linking them
// together, and perhaps take steps to avoid getting into situations like these two methods.
// Fetch the list of pools which are related to the entitlements but are *not* being
// deleted. We'll need to update the quantities on these.
Collection<String> affectedPoolIds = this.poolCurator.getPoolIdsForEntitlements(entitlementIds);
affectedPoolIds.removeAll(poolIds);
// Fetch entitlements (uggh).
// TODO: Stop doing this. Update the bits below to not use the entities directly and
// do the updates via queries.
Collection<Entitlement> entitlements = !entitlementIds.isEmpty() ? this.entitlementCurator.listAllByIds(entitlementIds).list() : Collections.<Entitlement>emptySet();
// Mark remaining dependent entitlements dirty for this consumer
this.entitlementCurator.markDependentEntitlementsDirty(entitlementIds);
// Unlink the pools and entitlements we're about to delete so we don't error out while
// trying to delete entitlements.
this.poolCurator.clearPoolSourceEntitlementRefs(poolIds);
// Revoke/delete entitlements
if (!entitlements.isEmpty()) {
log.info("Revoking {} entitlements...", entitlements.size());
this.entitlementCurator.batchDelete(entitlements);
this.entitlementCurator.flush();
log.info("Entitlements successfully revoked");
} else {
log.info("Skipping entitlement revocation; no entitlements to revoke");
}
// Delete pools
log.info("Deleting {} pools...", pools.size());
this.poolCurator.batchDelete(pools, alreadyDeletedPoolIds);
this.poolCurator.flush();
log.info("Pools successfully deleted");
if (!entitlements.isEmpty()) {
// Update entitlement counts on affected, non-deleted pools
log.info("Updating entitlement counts on remaining, affected pools...");
Map<Consumer, List<Entitlement>> consumerStackedEnts = new HashMap<>();
List<Pool> poolsToSave = new LinkedList<>();
Set<String> stackIds = new HashSet<>();
for (Entitlement entitlement : entitlements) {
// Since we're sifting through these already, let's also sort them into consumer lists
// for some of the other methods we'll be calling later
Consumer consumer = entitlement.getConsumer();
Pool pool = entitlement.getPool();
List<Entitlement> stackedEntitlements = consumerStackedEnts.get(consumer);
if (stackedEntitlements == null) {
stackedEntitlements = new LinkedList<>();
consumerStackedEnts.put(consumer, stackedEntitlements);
}
if (!"true".equals(pool.getAttributeValue(Pool.Attributes.DERIVED_POOL)) && pool.hasProductAttribute(Product.Attributes.STACKING_ID)) {
stackedEntitlements.add(entitlement);
stackIds.add(entitlement.getPool().getStackId());
}
// Update quantities if the entitlement quantity is non-zero
int quantity = entitlement.getQuantity() != null ? entitlement.getQuantity() : 0;
if (quantity != 0) {
// Update the pool quantities if we didn't delete it
if (affectedPoolIds.contains(pool.getId())) {
pool.setConsumed(pool.getConsumed() - quantity);
poolsToSave.add(pool);
}
// Update entitlement counts for affected consumers...
consumer.setEntitlementCount(consumer.getEntitlementCount() - quantity);
// Set the number exported if we're working with a manifest distributor
ConsumerType ctype = this.consumerTypeCurator.getConsumerType(consumer);
if (ctype != null && ctype.isManifest()) {
pool.setExported(pool.getExported() - quantity);
}
}
}
this.poolCurator.updateAll(poolsToSave, false, false);
this.consumerCurator.updateAll(consumerStackedEnts.keySet(), false, false);
this.consumerCurator.flush();
log.info("Entitlement counts successfully updated for {} pools and {} consumers", poolsToSave.size(), consumerStackedEnts.size());
// Update stacked entitlements for affected consumers(???)
if (!stackIds.isEmpty()) {
// Get consumer + pool tuples for stack ids
Map<String, Set<String>> consumerStackDerivedPoolIds = this.poolCurator.getConsumerStackDerivedPoolIdMap(stackIds);
if (!consumerStackDerivedPoolIds.isEmpty()) {
log.info("Updating stacked entitlements for {} consumers...", consumerStackDerivedPoolIds.size());
for (Consumer consumer : consumerStackedEnts.keySet()) {
Set<String> subPoolIds = consumerStackDerivedPoolIds.get(consumer.getId());
if (subPoolIds != null && !subPoolIds.isEmpty()) {
// Resolve pool IDs...
Collection<Pool> subPools = this.poolCurator.listAllByIds(subPoolIds).list();
// Invoke the rules engine to update the affected pools
if (subPools != null && !subPools.isEmpty()) {
log.debug("Updating {} stacking pools for consumer: {}", subPools.size(), consumer);
this.poolRules.updatePoolsFromStack(consumer, subPools, alreadyDeletedPoolIds, true);
}
}
}
}
}
this.consumerCurator.flush();
// Hydrate remaining consumer pools so we can skip some extra work during serialization
Set<Pool> poolsToHydrate = new HashSet<>();
for (Consumer consumer : consumerStackedEnts.keySet()) {
for (Entitlement entitlement : consumer.getEntitlements()) {
poolsToHydrate.add(entitlement.getPool());
}
}
this.productCurator.hydratePoolProvidedProducts(poolsToHydrate);
// Fire post-unbind events for revoked entitlements
log.info("Firing post-unbind events for {} entitlements...", entitlements.size());
for (Entitlement entitlement : entitlements) {
this.enforcer.postUnbind(entitlement.getConsumer(), this, entitlement);
}
// Recalculate status for affected consumers
log.info("Recomputing status for {} consumers", consumerStackedEnts.size());
int i = 0;
for (Consumer consumer : consumerStackedEnts.keySet()) {
this.complianceRules.getStatus(consumer);
if (++i % 1000 == 0) {
this.consumerCurator.flush();
}
}
this.consumerCurator.flush();
log.info("All statuses recomputed");
}
// Impl note:
// We don't need to fire entitlement revocation events, since they're all being revoked as
// a consequence of the pools being deleted.
// Fire pool deletion events
// This part hurts so much. Because we output the whole entity, we have to fetch the bloody
// things before we delete them.
log.info("Firing pool deletion events for {} pools...", pools.size());
for (Pool pool : pools) {
this.sink.queueEvent(this.eventFactory.poolDeleted(pool));
}
} else {
log.info("Skipping pool deletion; no pools to delete");
}
}
Aggregations