use of org.quartz.JobExecutionException in project archiva by apache.
the class RepositoryTaskJob method execute.
/**
* Execute the discoverer and the indexer.
*
* @param context
* @throws org.quartz.JobExecutionException
*/
@Override
public void execute(JobExecutionContext context) throws JobExecutionException {
JobDataMap dataMap = context.getJobDetail().getJobDataMap();
setJobDataMap(dataMap);
TaskQueue taskQueue = (TaskQueue) dataMap.get(DefaultRepositoryArchivaTaskScheduler.TASK_QUEUE);
String repositoryId = (String) dataMap.get(DefaultRepositoryArchivaTaskScheduler.TASK_REPOSITORY);
RepositoryTask task = new RepositoryTask();
task.setRepositoryId(repositoryId);
try {
taskQueue.put(task);
} catch (TaskQueueException e) {
throw new JobExecutionException(e);
}
}
use of org.quartz.JobExecutionException in project candlepin by candlepin.
the class CertificateRevocationListTask method toExecute.
public void toExecute(JobExecutionContext ctx) throws JobExecutionException {
String filePath = config.getString(ConfigProperties.CRL_FILE_PATH);
log.info("Executing CRL Job. CRL filePath={}", filePath);
if (filePath == null) {
throw new JobExecutionException("Invalid " + ConfigProperties.CRL_FILE_PATH, false);
}
try {
File crlFile = new File(filePath);
this.crlFileUtil.syncCRLWithDB(crlFile);
} catch (IOException e) {
log.error("IOException:", e);
throw new JobExecutionException(e, false);
}
}
use of org.quartz.JobExecutionException in project candlepin by candlepin.
the class ExportJob method toExecute.
@Override
public void toExecute(JobExecutionContext context) throws JobExecutionException {
JobDataMap map = context.getMergedJobDataMap();
String consumerUuid = map.getString(JobStatus.TARGET_ID);
String cdnLabel = map.getString(CDN_LABEL);
String webAppPrefix = map.getString(WEBAPP_PREFIX);
String apiUrl = map.getString(API_URL);
Map<String, String> extensionData = (Map<String, String>) map.get(EXTENSION_DATA);
log.info("Starting async export for {}", consumerUuid);
try {
ExportResult result = manifestManager.generateAndStoreManifest(consumerUuid, cdnLabel, webAppPrefix, apiUrl, extensionData);
context.setResult(result);
log.info("Async export complete.");
} catch (Exception e) {
throw new JobExecutionException(e.getMessage(), e, false);
}
}
use of org.quartz.JobExecutionException in project candlepin by candlepin.
the class HypervisorUpdateJob method toExecute.
/**
* {@inheritDoc}
*
* Executes {@link ConsumerResource#create(org.candlepin.model.Consumer, org.candlepin.auth.Principal,
* java.lang.String, java.lang.String, java.lang.String)}
* Executes (@link ConusmerResource#performConsumerUpdates(java.utl.String, org.candlepin.model.Consumer)}
* as a pinsetter job.
*
* @param context the job's execution context
*/
@Transactional
@SuppressWarnings({ "checkstyle:indentation", "checkstyle:methodlength" })
public void toExecute(JobExecutionContext context) throws JobExecutionException {
try {
JobDataMap map = context.getMergedJobDataMap();
String ownerKey = map.getString(JobStatus.TARGET_ID);
Boolean create = map.getBoolean(CREATE);
Principal principal = (Principal) map.get(PRINCIPAL);
String jobReporterId = map.getString(REPORTER_ID);
HypervisorUpdateResultUuids result = new HypervisorUpdateResultUuids();
Owner owner = ownerCurator.lookupByKey(ownerKey);
if (owner == null) {
context.setResult("Nothing to do. Owner does not exist");
log.warn("Hypervisor update attempted against non-existent org id \"{0}\"", ownerKey);
return;
}
if (owner.isAutobindDisabled()) {
log.debug("Could not update host/guest mapping. Auto-Attach is disabled for owner {}", owner.getKey());
throw new BadRequestException(i18n.tr("Could not update host/guest mapping. Auto-attach is disabled for owner {0}.", owner.getKey()));
}
byte[] data = (byte[]) map.get(DATA);
String json = decompress(data);
HypervisorList hypervisors = (HypervisorList) Util.fromJson(json, HypervisorList.class);
log.debug("Hypervisor consumers for create/update: {}", hypervisors.getHypervisors().size());
log.debug("Updating hypervisor consumers for org {0}", ownerKey);
Set<String> hosts = new HashSet<>();
Set<String> guests = new HashSet<>();
Map<String, Consumer> incomingHosts = new HashMap<>();
parseHypervisorList(hypervisors, hosts, guests, incomingHosts);
// TODO Need to ensure that we retrieve existing guestIds from the DB before continuing.
// Maps virt hypervisor ID to registered consumer for that hypervisor, should one exist:
VirtConsumerMap hypervisorConsumersMap = consumerCurator.getHostConsumersMap(owner, hosts);
Map<String, GuestId> guestIds = consumerCurator.getGuestIdMap(guests, owner);
for (String hypervisorId : hosts) {
Consumer knownHost = hypervisorConsumersMap.get(hypervisorId);
Consumer incoming = syncGuestIds(incomingHosts.get(hypervisorId), guestIds);
Consumer reportedOnConsumer = null;
if (knownHost == null) {
if (!create) {
result.failed(hypervisorId, "Unable to find hypervisor with id " + hypervisorId + " in org " + ownerKey);
} else {
log.debug("Registering new host consumer for hypervisor ID: {}", hypervisorId);
Consumer newHost = createConsumerForHypervisorId(hypervisorId, jobReporterId, owner, principal, incoming);
// Since we just created this new consumer, we can migrate the guests immediately
GuestMigration guestMigration = new GuestMigration(consumerCurator).buildMigrationManifest(incoming, newHost);
// Now that we have the new consumer persisted, immediately migrate the guests to it
if (guestMigration.isMigrationPending()) {
guestMigration.migrate(false);
}
hypervisorConsumersMap.add(hypervisorId, newHost);
result.created(newHost);
reportedOnConsumer = newHost;
}
} else {
reportedOnConsumer = knownHost;
if (jobReporterId != null && knownHost.getHypervisorId() != null && hypervisorId.equalsIgnoreCase(knownHost.getHypervisorId().getHypervisorId()) && knownHost.getHypervisorId().getReporterId() != null && !jobReporterId.equalsIgnoreCase(knownHost.getHypervisorId().getReporterId())) {
log.debug("Reporter changed for Hypervisor {} of Owner {} from {} to {}", hypervisorId, ownerKey, knownHost.getHypervisorId().getReporterId(), jobReporterId);
}
boolean typeUpdated = false;
if (!hypervisorType.getId().equals(knownHost.getTypeId())) {
typeUpdated = true;
knownHost.setType(hypervisorType);
}
GuestMigration guestMigration = new GuestMigration(consumerCurator).buildMigrationManifest(incoming, knownHost);
boolean factsUpdated = consumerResource.checkForFactsUpdate(knownHost, incoming);
if (factsUpdated || guestMigration.isMigrationPending() || typeUpdated) {
knownHost.setLastCheckin(new Date());
guestMigration.migrate(false);
result.updated(knownHost);
} else {
result.unchanged(knownHost);
}
}
// update reporter id if it changed
if (jobReporterId != null && reportedOnConsumer != null && reportedOnConsumer.getHypervisorId() != null && (reportedOnConsumer.getHypervisorId().getReporterId() == null || !jobReporterId.contentEquals(reportedOnConsumer.getHypervisorId().getReporterId()))) {
reportedOnConsumer.getHypervisorId().setReporterId(jobReporterId);
} else if (jobReporterId == null) {
log.debug("hypervisor checkin reported asynchronously without reporter id " + "for hypervisor:{} of owner:{}", hypervisorId, ownerKey);
}
}
for (Consumer consumer : hypervisorConsumersMap.getConsumers()) {
consumer = result.wasCreated(consumer) ? consumerCurator.create(consumer, false) : consumerCurator.update(consumer, false);
}
consumerCurator.flush();
log.info("Summary for report from {} by principal {}\n {}", jobReporterId, principal, result);
context.setResult(result);
} catch (Exception e) {
log.error("HypervisorUpdateJob encountered a problem.", e);
context.setResult(e.getMessage());
throw new JobExecutionException(e.getMessage(), e, false);
}
}
use of org.quartz.JobExecutionException in project candlepin by candlepin.
the class EntitleByProductsJob method toExecute.
@SuppressWarnings("unchecked")
@Override
public void toExecute(JobExecutionContext ctx) throws JobExecutionException {
try {
JobDataMap map = ctx.getMergedJobDataMap();
String uuid = (String) map.get(JobStatus.TARGET_ID);
Date entitleDate = (Date) map.get("entitle_date");
String[] prodIds = (String[]) map.get("product_ids");
Collection<String> fromPools = (Collection<String>) map.get("from_pools");
List<Entitlement> ents = entitler.bindByProducts(prodIds, uuid, entitleDate, fromPools);
entitler.sendEvents(ents);
ctx.setResult("Entitlements created for owner");
}// so that the job will be properly cleaned up on failure.
catch (Exception e) {
log.error("EntitlerJob encountered a problem.", e);
ctx.setResult(e.getMessage());
throw new JobExecutionException(e.getMessage(), e, false);
}
}
Aggregations