use of org.candlepin.auth.Principal in project candlepin by candlepin.
the class ConsumerCuratorPermissionsTest method setupEditMyConsumersViewAllPrincipal.
private User setupEditMyConsumersViewAllPrincipal() {
Set<Permission> perms = new HashSet<>();
User u = new User("fakeuser", "dontcare");
perms.add(new UsernameConsumersPermission(u, owner));
perms.add(new OwnerPermission(owner, Access.READ_ONLY));
Principal p = new UserPrincipal(u.getUsername(), perms, false);
setupPrincipal(p);
return u;
}
use of org.candlepin.auth.Principal in project candlepin by candlepin.
the class SuperAdminAuthorizationFilter method runFilter.
@Override
public void runFilter(ContainerRequestContext requestContext) {
log.debug("Authorization check for {}", requestContext.getUriInfo().getPath());
Principal principal = (Principal) requestContext.getSecurityContext().getUserPrincipal();
ResourceInfo resourceInfo = ResteasyProviderFactory.getContextData(ResourceInfo.class);
Method method = resourceInfo.getResourceMethod();
if (!principal.hasFullAccess()) {
denyAccess(principal, method);
}
}
use of org.candlepin.auth.Principal in project candlepin by candlepin.
the class VerifyAuthorizationFilter method runFilter.
@Override
public void runFilter(ContainerRequestContext requestContext) {
HttpRequest request = ResteasyProviderFactory.getContextData(HttpRequest.class);
Principal principal = (Principal) requestContext.getSecurityContext().getUserPrincipal();
ResourceInfo resourceInfo = ResteasyProviderFactory.getContextData(ResourceInfo.class);
Method method = resourceInfo.getResourceMethod();
if (log.isDebugEnabled()) {
log.debug("Authorization check for {} mapping to {}.{}", requestContext.getUriInfo().getPath(), method.getDeclaringClass().getName(), method.getName());
}
Map<Verify, Object> argMap = getArguments(request, method);
// Couldn't find a match in Resteasy for method
if (argMap.isEmpty()) {
/* It would also be possible to get here if a super-admin only method
* were inadvertently being filtered through this filter. Normally the
* AuthorizationFeature takes care of sending methods without any @Verify
* annotations through the SuperAdminAuthorizationFilter */
throw new IseException("Could not get parameters for " + method);
}
Access defaultAccess = getDefaultAccess(method);
if (!hasAccess(argMap, principal, defaultAccess)) {
denyAccess(principal, method);
}
}
use of org.candlepin.auth.Principal in project candlepin by candlepin.
the class HypervisorUpdateJob method toExecute.
/**
* {@inheritDoc}
*
* Executes {@link ConsumerResource#create(org.candlepin.model.Consumer, org.candlepin.auth.Principal,
* java.lang.String, java.lang.String, java.lang.String)}
* Executes (@link ConusmerResource#performConsumerUpdates(java.utl.String, org.candlepin.model.Consumer)}
* as a pinsetter job.
*
* @param context the job's execution context
*/
@Transactional
@SuppressWarnings({ "checkstyle:indentation", "checkstyle:methodlength" })
public void toExecute(JobExecutionContext context) throws JobExecutionException {
try {
JobDataMap map = context.getMergedJobDataMap();
String ownerKey = map.getString(JobStatus.TARGET_ID);
Boolean create = map.getBoolean(CREATE);
Principal principal = (Principal) map.get(PRINCIPAL);
String jobReporterId = map.getString(REPORTER_ID);
HypervisorUpdateResultUuids result = new HypervisorUpdateResultUuids();
Owner owner = ownerCurator.lookupByKey(ownerKey);
if (owner == null) {
context.setResult("Nothing to do. Owner does not exist");
log.warn("Hypervisor update attempted against non-existent org id \"{0}\"", ownerKey);
return;
}
if (owner.isAutobindDisabled()) {
log.debug("Could not update host/guest mapping. Auto-Attach is disabled for owner {}", owner.getKey());
throw new BadRequestException(i18n.tr("Could not update host/guest mapping. Auto-attach is disabled for owner {0}.", owner.getKey()));
}
byte[] data = (byte[]) map.get(DATA);
String json = decompress(data);
HypervisorList hypervisors = (HypervisorList) Util.fromJson(json, HypervisorList.class);
log.debug("Hypervisor consumers for create/update: {}", hypervisors.getHypervisors().size());
log.debug("Updating hypervisor consumers for org {0}", ownerKey);
Set<String> hosts = new HashSet<>();
Set<String> guests = new HashSet<>();
Map<String, Consumer> incomingHosts = new HashMap<>();
parseHypervisorList(hypervisors, hosts, guests, incomingHosts);
// TODO Need to ensure that we retrieve existing guestIds from the DB before continuing.
// Maps virt hypervisor ID to registered consumer for that hypervisor, should one exist:
VirtConsumerMap hypervisorConsumersMap = consumerCurator.getHostConsumersMap(owner, hosts);
Map<String, GuestId> guestIds = consumerCurator.getGuestIdMap(guests, owner);
for (String hypervisorId : hosts) {
Consumer knownHost = hypervisorConsumersMap.get(hypervisorId);
Consumer incoming = syncGuestIds(incomingHosts.get(hypervisorId), guestIds);
Consumer reportedOnConsumer = null;
if (knownHost == null) {
if (!create) {
result.failed(hypervisorId, "Unable to find hypervisor with id " + hypervisorId + " in org " + ownerKey);
} else {
log.debug("Registering new host consumer for hypervisor ID: {}", hypervisorId);
Consumer newHost = createConsumerForHypervisorId(hypervisorId, jobReporterId, owner, principal, incoming);
// Since we just created this new consumer, we can migrate the guests immediately
GuestMigration guestMigration = new GuestMigration(consumerCurator).buildMigrationManifest(incoming, newHost);
// Now that we have the new consumer persisted, immediately migrate the guests to it
if (guestMigration.isMigrationPending()) {
guestMigration.migrate(false);
}
hypervisorConsumersMap.add(hypervisorId, newHost);
result.created(newHost);
reportedOnConsumer = newHost;
}
} else {
reportedOnConsumer = knownHost;
if (jobReporterId != null && knownHost.getHypervisorId() != null && hypervisorId.equalsIgnoreCase(knownHost.getHypervisorId().getHypervisorId()) && knownHost.getHypervisorId().getReporterId() != null && !jobReporterId.equalsIgnoreCase(knownHost.getHypervisorId().getReporterId())) {
log.debug("Reporter changed for Hypervisor {} of Owner {} from {} to {}", hypervisorId, ownerKey, knownHost.getHypervisorId().getReporterId(), jobReporterId);
}
boolean typeUpdated = false;
if (!hypervisorType.getId().equals(knownHost.getTypeId())) {
typeUpdated = true;
knownHost.setType(hypervisorType);
}
GuestMigration guestMigration = new GuestMigration(consumerCurator).buildMigrationManifest(incoming, knownHost);
boolean factsUpdated = consumerResource.checkForFactsUpdate(knownHost, incoming);
if (factsUpdated || guestMigration.isMigrationPending() || typeUpdated) {
knownHost.setLastCheckin(new Date());
guestMigration.migrate(false);
result.updated(knownHost);
} else {
result.unchanged(knownHost);
}
}
// update reporter id if it changed
if (jobReporterId != null && reportedOnConsumer != null && reportedOnConsumer.getHypervisorId() != null && (reportedOnConsumer.getHypervisorId().getReporterId() == null || !jobReporterId.contentEquals(reportedOnConsumer.getHypervisorId().getReporterId()))) {
reportedOnConsumer.getHypervisorId().setReporterId(jobReporterId);
} else if (jobReporterId == null) {
log.debug("hypervisor checkin reported asynchronously without reporter id " + "for hypervisor:{} of owner:{}", hypervisorId, ownerKey);
}
}
for (Consumer consumer : hypervisorConsumersMap.getConsumers()) {
consumer = result.wasCreated(consumer) ? consumerCurator.create(consumer, false) : consumerCurator.update(consumer, false);
}
consumerCurator.flush();
log.info("Summary for report from {} by principal {}\n {}", jobReporterId, principal, result);
context.setResult(result);
} catch (Exception e) {
log.error("HypervisorUpdateJob encountered a problem.", e);
context.setResult(e.getMessage());
throw new JobExecutionException(e.getMessage(), e, false);
}
}
use of org.candlepin.auth.Principal in project candlepin by candlepin.
the class PinsetterJobListener method jobToBeExecuted.
@Override
public void jobToBeExecuted(JobExecutionContext context) {
Principal principal = (Principal) context.getMergedJobDataMap().get(PRINCIPAL_KEY);
ResteasyProviderFactory.pushContext(Principal.class, principal);
try {
unitOfWork.begin();
updateJob(context);
} catch (Exception e) {
log.error("jobToBeExecuted encountered a problem. Usually means " + "there was a problem storing the job status. Job will run.", e);
} finally {
unitOfWork.end();
}
}
Aggregations