Search in sources :

Example 46 with JobExecutionException

use of org.quartz.JobExecutionException in project oap by oaplatform.

the class RunnableJob method execute.

@Override
public synchronized void execute(JobExecutionContext context) throws JobExecutionException {
    try {
        log.trace("executing {}", jobDetail);
        runningThread.set(Thread.currentThread());
        if (!Thread.interrupted())
            runnable.run();
    } catch (Exception e) {
        throw new JobExecutionException(e);
    } finally {
        runningThread.set(null);
        this.notify();
    }
}
Also used : JobExecutionException(org.quartz.JobExecutionException) JobExecutionException(org.quartz.JobExecutionException)

Example 47 with JobExecutionException

use of org.quartz.JobExecutionException in project alfresco-repository by Alfresco.

the class PostLookup method execute.

public void execute() throws JobExecutionException {
    checkProperties();
    // Avoid running when in read-only mode
    if (!transactionService.getAllowWrite()) {
        if (logger.isTraceEnabled()) {
            logger.trace("Post lookup not running due to read-only server");
        }
        return;
    }
    long start = System.currentTimeMillis();
    String lockToken = null;
    LockCallback lockCallback = new LockCallback();
    try {
        if (jobLockService != null) {
            lockToken = acquireLock(lockCallback);
        }
        ActivityPostEntity params = new ActivityPostEntity();
        params.setStatus(ActivityPostEntity.STATUS.PENDING.toString());
        if (logger.isDebugEnabled()) {
            logger.debug("Selecting activity posts with status: " + ActivityPostEntity.STATUS.PENDING.toString());
        }
        // get all pending post (for this job run)
        final List<ActivityPostEntity> activityPosts = postDAO.selectPosts(params, maxItemsPerCycle);
        if (activityPosts.size() > 0) {
            if (logger.isDebugEnabled()) {
                logger.debug("Update: " + activityPosts.size() + " activity post" + (activityPosts.size() == 1 ? "s" : ""));
            }
            // execute in READ txn
            transactionService.getRetryingTransactionHelper().doInTransaction(new RetryingTransactionCallback<Object>() {

                public Object execute() throws Throwable {
                    // lookup any additional data
                    lookupPosts(activityPosts);
                    return null;
                }
            }, true);
            // execute in WRITE txn
            List<ActivityPostEntity> activityPostsToUpdate = transactionService.getRetryingTransactionHelper().doInTransaction(new RetryingTransactionCallback<List<ActivityPostEntity>>() {

                public List<ActivityPostEntity> execute() throws Throwable {
                    // collapse (ie. rollup) and relevant posts
                    return rollupPosts(activityPosts);
                }
            }, false);
            // update posts + status (note: will also add any new rolled-up posts)
            updatePosts(activityPostsToUpdate);
            if (logger.isInfoEnabled()) {
                int cnt = activityPostsToUpdate.size();
                logger.info("Updated: " + cnt + " activity post" + (cnt == 1 ? "" : "s") + " (in " + (System.currentTimeMillis() - start) + " msecs)");
            }
        }
    } catch (LockAcquisitionException e) {
        // Job being done by another process
        if (logger.isDebugEnabled()) {
            logger.debug("execute: Can't get lock. Assume post lookup job already underway: " + e);
        }
    } catch (SQLException e) {
        logger.error("Exception during select of posts: ", e);
        throw new JobExecutionException(e);
    } catch (Throwable e) {
        // If the VM is shutting down, then ignore
        if (vmShutdownListener.isVmShuttingDown()) {
        // Ignore
        } else {
            logger.error("Exception during update of posts: ", e);
        }
    } finally {
        releaseLock(lockCallback, lockToken);
    }
}
Also used : SQLException(java.sql.SQLException) ActivityPostEntity(org.alfresco.repo.domain.activities.ActivityPostEntity) JobExecutionException(org.quartz.JobExecutionException) JSONObject(org.json.JSONObject) ArrayList(java.util.ArrayList) List(java.util.List) LockAcquisitionException(org.alfresco.repo.lock.LockAcquisitionException)

Example 48 with JobExecutionException

use of org.quartz.JobExecutionException in project alfresco-repository by Alfresco.

the class FeedCleaner method executeWithLock.

/**
 * Does the actual cleanup, expecting the lock to be maintained
 *
 * @param keepGoing <tt>true</tt> to continue but will switch to <tt>false</tt> to stop
 * @return          number of entries deleted through whatever means
 */
private int executeWithLock(final AtomicBoolean keepGoing) throws JobExecutionException {
    int maxIdRangeDeletedCount = 0;
    int maxAgeDeletedCount = 0;
    int maxSizeDeletedCount = 0;
    try {
        if (maxIdRange > 0 && keepGoing.get()) {
            maxIdRangeDeletedCount = feedDAO.deleteFeedEntries(maxIdRange);
            if (logger.isTraceEnabled()) {
                logger.trace("Cleaned " + maxIdRangeDeletedCount + " entries to keep ID range of " + maxIdRange + ".");
            }
        }
        if (maxAgeMins > 0 && keepGoing.get()) {
            // clean old entries based on maxAgeMins
            long nowTimeOffset = new Date().getTime();
            // millsecs = mins * 60 secs * 1000 msecs
            long keepTimeOffset = nowTimeOffset - ((long) maxAgeMins * 60000L);
            Date keepDate = new Date(keepTimeOffset);
            maxAgeDeletedCount = feedDAO.deleteFeedEntries(keepDate);
            if (logger.isTraceEnabled()) {
                logger.trace("Cleaned " + maxAgeDeletedCount + " entries (upto " + keepDate + ", max age " + maxAgeMins + " mins)");
            }
        }
        // TODO:    ALF-15511
        if (maxFeedSize > 0 && keepGoing.get()) {
            // Get user+format feeds exceeding the required maximum
            if (logger.isTraceEnabled()) {
                logger.trace("Selecting user+format feeds exceeding the required maximum of " + maxFeedSize + " entries.");
            }
            List<ActivityFeedEntity> userFeedsTooMany = feedDAO.selectUserFeedsToClean(maxFeedSize);
            for (ActivityFeedEntity userFeedTooMany : userFeedsTooMany) {
                if (!keepGoing.get()) {
                    if (logger.isTraceEnabled()) {
                        logger.trace("Stopping cleaning the feeds.");
                    }
                    break;
                }
                if (logger.isTraceEnabled()) {
                    logger.trace("Found user activity feed entity: " + userFeedTooMany.toString());
                }
                String feedUserId = userFeedTooMany.getFeedUserId();
                // feed entries, we can just filter them out now.
                if (ActivitiesDAO.KEY_ACTIVITY_NULL_VALUE.equals(feedUserId)) {
                    if (logger.isTraceEnabled()) {
                        logger.trace("Found site-specific feed entries, filtering.");
                    }
                    continue;
                }
                // Get the feeds to keep
                if (logger.isTraceEnabled()) {
                    logger.trace("Get the feeds to keep for user for all sites, not exluding users.");
                }
                List<ActivityFeedEntity> feedsToKeep = feedDAO.selectUserFeedEntries(feedUserId, null, false, false, -1L, maxFeedSize);
                if (logger.isTraceEnabled()) {
                    for (ActivityFeedEntity feedToKeep : feedsToKeep) {
                        logger.trace("Found user activity feed entity to keep: " + feedToKeep.toString());
                    }
                }
                // If the feeds have been removed, then ignore
                if (feedsToKeep.size() < maxFeedSize) {
                    if (logger.isTraceEnabled()) {
                        logger.trace("Found less then " + maxFeedSize + " .The feeds were removed, ignoring.");
                    }
                    continue;
                }
                // Get the last one
                Date oldestFeedEntry = feedsToKeep.get(maxFeedSize - 1).getPostDate();
                if (logger.isTraceEnabled()) {
                    logger.trace("Deleting the oldest feed entry: " + oldestFeedEntry.toString());
                }
                int deletedCount = feedDAO.deleteUserFeedEntries(feedUserId, oldestFeedEntry);
                if (logger.isTraceEnabled()) {
                    logger.trace("Cleaned " + deletedCount + " entries for user '" + feedUserId + "'.");
                }
                maxSizeDeletedCount += deletedCount;
            }
            // Get site+format feeds exceeding the required maximum
            if (logger.isTraceEnabled()) {
                logger.trace("Selecting site+format feeds exceeding the required maximum of " + maxFeedSize + " entries.");
            }
            List<ActivityFeedEntity> siteFeedsTooMany = feedDAO.selectSiteFeedsToClean(maxFeedSize);
            for (ActivityFeedEntity siteFeedTooMany : siteFeedsTooMany) {
                if (!keepGoing.get()) {
                    if (logger.isTraceEnabled()) {
                        logger.trace("Stopping cleaning the feeds.");
                    }
                    break;
                }
                if (logger.isTraceEnabled()) {
                    logger.trace("Found site activity feed entity: " + siteFeedTooMany.toString());
                }
                String siteId = siteFeedTooMany.getSiteNetwork();
                // Get the feeds to keep
                if (logger.isTraceEnabled()) {
                    logger.trace("Get the feeds to keep for site.");
                }
                List<ActivityFeedEntity> feedsToKeep = feedDAO.selectSiteFeedEntries(siteId, maxFeedSize);
                if (logger.isTraceEnabled()) {
                    for (ActivityFeedEntity feedToKeep : feedsToKeep) {
                        logger.trace("Found site activity feed entity to keep: " + feedToKeep.toString());
                    }
                }
                // If the feeds have been removed, then ignore
                if (feedsToKeep.size() < maxFeedSize) {
                    continue;
                }
                // Get the last one
                Date oldestFeedEntry = feedsToKeep.get(maxFeedSize - 1).getPostDate();
                if (logger.isTraceEnabled()) {
                    logger.trace("Deleting the oldest feed entry: " + oldestFeedEntry.toString());
                }
                int deletedCount = feedDAO.deleteSiteFeedEntries(siteId, oldestFeedEntry);
                if (logger.isTraceEnabled()) {
                    logger.trace("Cleaned " + deletedCount + " entries for site '" + siteId + "'.");
                }
                maxSizeDeletedCount += deletedCount;
            }
        }
    } catch (SQLException e) {
        logger.error("Exception during cleanup of feeds", e);
        throw new JobExecutionException(e);
    } catch (Throwable e) {
        // We were told to stop, which is also what will happen if the VM shuts down
        if (!keepGoing.get()) {
        // Ignore
        } else {
            logger.error("Exception during cleanup of feeds", e);
        }
    }
    return (maxIdRangeDeletedCount + maxAgeDeletedCount + maxSizeDeletedCount);
}
Also used : JobExecutionException(org.quartz.JobExecutionException) SQLException(java.sql.SQLException) Date(java.util.Date) ActivityFeedEntity(org.alfresco.repo.domain.activities.ActivityFeedEntity)

Example 49 with JobExecutionException

use of org.quartz.JobExecutionException in project alfresco-repository by Alfresco.

the class PostCleaner method executeWithLock.

public void executeWithLock() throws JobExecutionException {
    checkProperties();
    try {
        long nowTimeOffset = new Date().getTime();
        // millsecs = mins * 60 secs * 1000 msecs
        long keepTimeOffset = nowTimeOffset - (maxAgeMins * 60 * 1000);
        Date keepDate = new Date(keepTimeOffset);
        // clean old entries - PROCESSED - does not clean POSTED or PENDING, which will need to be done manually, if stuck
        int deletedCount = postDAO.deletePosts(keepDate, ActivityPostEntity.STATUS.PROCESSED);
        if (logger.isDebugEnabled()) {
            logger.debug("Cleaned " + deletedCount + " entries (upto " + keepDate + ", max age " + maxAgeMins + " mins)");
        }
    } catch (SQLException e) {
        logger.error("Exception during cleanup of posts", e);
        throw new JobExecutionException(e);
    } catch (Throwable e) {
        // If the VM is shutting down, then ignore
        if (vmShutdownListener.isVmShuttingDown()) {
        // Ignore
        } else {
            logger.error("Exception during cleanup of posts", e);
        }
    }
}
Also used : JobExecutionException(org.quartz.JobExecutionException) SQLException(java.sql.SQLException) Date(java.util.Date)

Example 50 with JobExecutionException

use of org.quartz.JobExecutionException in project alfresco-repository by Alfresco.

the class FeedNotifierJob method execute.

/**
 * Calls the feed notifier to do its work
 */
public void execute(JobExecutionContext context) throws JobExecutionException {
    JobDataMap jobData = context.getJobDetail().getJobDataMap();
    final FeedNotifier feedNotifier = (FeedNotifier) jobData.get(KEY_FEED_NOTIFIER);
    final TenantAdminService tenantAdminService = (TenantAdminService) jobData.get(KEY_TENANT_ADMIN_SERVICE);
    Long repeatInterval = null;
    Trigger trigger = context.getTrigger();
    if (trigger instanceof SimpleTrigger) {
        repeatInterval = ((SimpleTrigger) trigger).getRepeatInterval();
    }
    final int repeatIntervalMins = new Long(repeatInterval == null ? 0L : repeatInterval / 1000 / 60).intValue();
    AuthenticationUtil.runAs(new RunAsWork<Object>() {

        public Object doWork() throws Exception {
            feedNotifier.execute(repeatIntervalMins);
            return null;
        }
    }, AuthenticationUtil.getSystemUserName());
    if ((tenantAdminService != null) && tenantAdminService.isEnabled()) {
        List<Tenant> tenants = tenantAdminService.getAllTenants();
        for (Tenant tenant : tenants) {
            TenantUtil.runAsSystemTenant(new TenantRunAsWork<Object>() {

                public Object doWork() throws Exception {
                    feedNotifier.execute(repeatIntervalMins);
                    return null;
                }
            }, tenant.getTenantDomain());
        }
    }
}
Also used : JobDataMap(org.quartz.JobDataMap) JobExecutionException(org.quartz.JobExecutionException) Trigger(org.quartz.Trigger) SimpleTrigger(org.quartz.SimpleTrigger) Tenant(org.alfresco.repo.tenant.Tenant) TenantAdminService(org.alfresco.repo.tenant.TenantAdminService) SimpleTrigger(org.quartz.SimpleTrigger)

Aggregations

JobExecutionException (org.quartz.JobExecutionException)123 JobDataMap (org.quartz.JobDataMap)35 ArrayList (java.util.ArrayList)18 Date (java.util.Date)16 IgnoreProvisionException (org.apache.syncope.core.provisioning.api.pushpull.IgnoreProvisionException)16 SchedulerException (org.quartz.SchedulerException)16 HashMap (java.util.HashMap)15 ProvisioningReport (org.apache.syncope.core.provisioning.api.pushpull.ProvisioningReport)15 Test (org.junit.Test)13 Result (org.apache.syncope.common.lib.types.AuditElements.Result)12 JobDetail (org.quartz.JobDetail)12 PullActions (org.apache.syncope.core.provisioning.api.pushpull.PullActions)11 SQLException (java.sql.SQLException)10 PropagationException (org.apache.syncope.core.provisioning.api.propagation.PropagationException)10 DelegatedAdministrationException (org.apache.syncope.core.spring.security.DelegatedAdministrationException)10 Map (java.util.Map)9 JobExecutionContext (org.quartz.JobExecutionContext)9 List (java.util.List)8 IOException (java.io.IOException)7 Realm (org.apache.syncope.core.persistence.api.entity.Realm)7