Search in sources :

Example 76 with JobExecutionException

use of org.quartz.JobExecutionException in project syncope by apache.

the class DefaultRealmPullResultHandler method deprovision.

private List<ProvisioningReport> deprovision(final SyncDelta delta, final List<String> keys, final boolean unlink) throws JobExecutionException {
    if (!profile.getTask().isPerformUpdate()) {
        LOG.debug("PullTask not configured for update");
        finalize(unlink ? MatchingRule.toEventName(MatchingRule.UNASSIGN) : MatchingRule.toEventName(MatchingRule.DEPROVISION), Result.SUCCESS, null, null, delta);
        return Collections.<ProvisioningReport>emptyList();
    }
    LOG.debug("About to deprovision {}", keys);
    final List<ProvisioningReport> results = new ArrayList<>();
    for (String key : keys) {
        LOG.debug("About to unassign resource {}", key);
        ProvisioningReport result = new ProvisioningReport();
        result.setOperation(ResourceOperation.DELETE);
        result.setAnyType(REALM_TYPE);
        result.setStatus(ProvisioningReport.Status.SUCCESS);
        result.setKey(key);
        Realm realm = realmDAO.find(key);
        RealmTO before = binder.getRealmTO(realm, true);
        if (before == null) {
            result.setStatus(ProvisioningReport.Status.FAILURE);
            result.setMessage(String.format("Realm '%s' not found", key));
        } else {
            result.setName(before.getFullPath());
        }
        if (!profile.isDryRun()) {
            Object output;
            Result resultStatus;
            if (before == null) {
                resultStatus = Result.FAILURE;
                output = null;
            } else {
                try {
                    if (unlink) {
                        for (PullActions action : profile.getActions()) {
                            action.beforeUnassign(profile, delta, before);
                        }
                    } else {
                        for (PullActions action : profile.getActions()) {
                            action.beforeDeprovision(profile, delta, before);
                        }
                    }
                    PropagationByResource propByRes = new PropagationByResource();
                    propByRes.add(ResourceOperation.DELETE, profile.getTask().getResource().getKey());
                    taskExecutor.execute(propagationManager.createTasks(realm, propByRes, null), false);
                    if (unlink) {
                        realm.getResources().remove(profile.getTask().getResource());
                        output = binder.getRealmTO(realmDAO.save(realm), true);
                    } else {
                        output = binder.getRealmTO(realm, true);
                    }
                    for (PullActions action : profile.getActions()) {
                        action.after(profile, delta, RealmTO.class.cast(output), result);
                    }
                    resultStatus = Result.SUCCESS;
                    LOG.debug("{} successfully updated", realm);
                } catch (PropagationException e) {
                    // A propagation failure doesn't imply a pull failure.
                    // The propagation exception status will be reported into the propagation task execution.
                    LOG.error("Could not propagate Realm {}", delta.getUid().getUidValue(), e);
                    output = e;
                    resultStatus = Result.FAILURE;
                } catch (Exception e) {
                    throwIgnoreProvisionException(delta, e);
                    result.setStatus(ProvisioningReport.Status.FAILURE);
                    result.setMessage(ExceptionUtils.getRootCauseMessage(e));
                    LOG.error("Could not update Realm {}", delta.getUid().getUidValue(), e);
                    output = e;
                    resultStatus = Result.FAILURE;
                }
            }
            finalize(unlink ? MatchingRule.toEventName(MatchingRule.UNASSIGN) : MatchingRule.toEventName(MatchingRule.DEPROVISION), resultStatus, before, output, delta);
        }
        results.add(result);
    }
    return results;
}
Also used : PropagationException(org.apache.syncope.core.provisioning.api.propagation.PropagationException) PullActions(org.apache.syncope.core.provisioning.api.pushpull.PullActions) ArrayList(java.util.ArrayList) RealmTO(org.apache.syncope.common.lib.to.RealmTO) PropagationByResource(org.apache.syncope.core.provisioning.api.PropagationByResource) ProvisioningReport(org.apache.syncope.core.provisioning.api.pushpull.ProvisioningReport) Realm(org.apache.syncope.core.persistence.api.entity.Realm) SyncopeClientException(org.apache.syncope.common.lib.SyncopeClientException) DelegatedAdministrationException(org.apache.syncope.core.spring.security.DelegatedAdministrationException) IgnoreProvisionException(org.apache.syncope.core.provisioning.api.pushpull.IgnoreProvisionException) PropagationException(org.apache.syncope.core.provisioning.api.propagation.PropagationException) JobExecutionException(org.quartz.JobExecutionException) Result(org.apache.syncope.common.lib.types.AuditElements.Result)

Example 77 with JobExecutionException

use of org.quartz.JobExecutionException in project syncope by apache.

the class DefaultRealmPullResultHandler method delete.

private List<ProvisioningReport> delete(final SyncDelta delta, final List<String> keys) throws JobExecutionException {
    if (!profile.getTask().isPerformDelete()) {
        LOG.debug("PullTask not configured for delete");
        finalize(ResourceOperation.DELETE.name().toLowerCase(), Result.SUCCESS, null, null, delta);
        return Collections.<ProvisioningReport>emptyList();
    }
    LOG.debug("About to delete {}", keys);
    List<ProvisioningReport> results = new ArrayList<>();
    for (String key : keys) {
        Object output;
        Result resultStatus = Result.FAILURE;
        ProvisioningReport result = new ProvisioningReport();
        try {
            result.setKey(key);
            result.setOperation(ResourceOperation.DELETE);
            result.setAnyType(REALM_TYPE);
            result.setStatus(ProvisioningReport.Status.SUCCESS);
            Realm realm = realmDAO.find(key);
            RealmTO before = binder.getRealmTO(realm, true);
            if (before == null) {
                result.setStatus(ProvisioningReport.Status.FAILURE);
                result.setMessage(String.format("Realm '%s' not found", key));
            } else {
                result.setName(before.getFullPath());
            }
            if (!profile.isDryRun()) {
                for (PullActions action : profile.getActions()) {
                    action.beforeDelete(profile, delta, before);
                }
                try {
                    if (!realmDAO.findChildren(realm).isEmpty()) {
                        throw SyncopeClientException.build(ClientExceptionType.HasChildren);
                    }
                    Set<String> adminRealms = Collections.singleton(realm.getFullPath());
                    AnyCond keyCond = new AnyCond(AttributeCond.Type.ISNOTNULL);
                    keyCond.setSchema("key");
                    SearchCond allMatchingCond = SearchCond.getLeafCond(keyCond);
                    int users = searchDAO.count(adminRealms, allMatchingCond, AnyTypeKind.USER);
                    int groups = searchDAO.count(adminRealms, allMatchingCond, AnyTypeKind.GROUP);
                    int anyObjects = searchDAO.count(adminRealms, allMatchingCond, AnyTypeKind.ANY_OBJECT);
                    if (users + groups + anyObjects > 0) {
                        SyncopeClientException containedAnys = SyncopeClientException.build(ClientExceptionType.AssociatedAnys);
                        containedAnys.getElements().add(users + " user(s)");
                        containedAnys.getElements().add(groups + " group(s)");
                        containedAnys.getElements().add(anyObjects + " anyObject(s)");
                        throw containedAnys;
                    }
                    PropagationByResource propByRes = new PropagationByResource();
                    for (String resource : realm.getResourceKeys()) {
                        propByRes.add(ResourceOperation.DELETE, resource);
                    }
                    List<PropagationTaskTO> tasks = propagationManager.createTasks(realm, propByRes, null);
                    taskExecutor.execute(tasks, false);
                    realmDAO.delete(realm);
                    output = null;
                    resultStatus = Result.SUCCESS;
                    for (PullActions action : profile.getActions()) {
                        action.after(profile, delta, before, result);
                    }
                } catch (Exception e) {
                    throwIgnoreProvisionException(delta, e);
                    result.setStatus(ProvisioningReport.Status.FAILURE);
                    result.setMessage(ExceptionUtils.getRootCauseMessage(e));
                    LOG.error("Could not delete {}", realm, e);
                    output = e;
                }
                finalize(ResourceOperation.DELETE.name().toLowerCase(), resultStatus, before, output, delta);
            }
            results.add(result);
        } catch (DelegatedAdministrationException e) {
            LOG.error("Not allowed to read Realm {}", key, e);
        } catch (Exception e) {
            LOG.error("Could not delete Realm {}", key, e);
        }
    }
    return results;
}
Also used : PropagationTaskTO(org.apache.syncope.common.lib.to.PropagationTaskTO) PullActions(org.apache.syncope.core.provisioning.api.pushpull.PullActions) ArrayList(java.util.ArrayList) RealmTO(org.apache.syncope.common.lib.to.RealmTO) SyncopeClientException(org.apache.syncope.common.lib.SyncopeClientException) PropagationByResource(org.apache.syncope.core.provisioning.api.PropagationByResource) DelegatedAdministrationException(org.apache.syncope.core.spring.security.DelegatedAdministrationException) ProvisioningReport(org.apache.syncope.core.provisioning.api.pushpull.ProvisioningReport) SyncopeClientException(org.apache.syncope.common.lib.SyncopeClientException) DelegatedAdministrationException(org.apache.syncope.core.spring.security.DelegatedAdministrationException) IgnoreProvisionException(org.apache.syncope.core.provisioning.api.pushpull.IgnoreProvisionException) PropagationException(org.apache.syncope.core.provisioning.api.propagation.PropagationException) JobExecutionException(org.quartz.JobExecutionException) Result(org.apache.syncope.common.lib.types.AuditElements.Result) SearchCond(org.apache.syncope.core.persistence.api.dao.search.SearchCond) Realm(org.apache.syncope.core.persistence.api.entity.Realm) AnyCond(org.apache.syncope.core.persistence.api.dao.search.AnyCond)

Example 78 with JobExecutionException

use of org.quartz.JobExecutionException in project syncope by apache.

the class DefaultRealmPullResultHandler method update.

private List<ProvisioningReport> update(final SyncDelta delta, final List<String> keys) throws JobExecutionException {
    if (!profile.getTask().isPerformUpdate()) {
        LOG.debug("PullTask not configured for update");
        finalize(MatchingRule.toEventName(MatchingRule.UPDATE), Result.SUCCESS, null, null, delta);
        return Collections.<ProvisioningReport>emptyList();
    }
    LOG.debug("About to update {}", keys);
    List<ProvisioningReport> results = new ArrayList<>();
    for (String key : keys) {
        LOG.debug("About to update {}", key);
        ProvisioningReport result = new ProvisioningReport();
        result.setOperation(ResourceOperation.UPDATE);
        result.setAnyType(REALM_TYPE);
        result.setStatus(ProvisioningReport.Status.SUCCESS);
        result.setKey(key);
        Realm realm = realmDAO.find(key);
        RealmTO before = binder.getRealmTO(realm, true);
        if (before == null) {
            result.setStatus(ProvisioningReport.Status.FAILURE);
            result.setMessage(String.format("Realm '%s' not found", key));
        } else {
            result.setName(before.getFullPath());
        }
        if (!profile.isDryRun()) {
            Result resultStatus;
            Object output;
            if (before == null) {
                resultStatus = Result.FAILURE;
                output = null;
            } else {
                try {
                    for (PullActions action : profile.getActions()) {
                        action.beforeUpdate(profile, delta, before, null);
                    }
                    PropagationByResource propByRes = binder.update(realm, before);
                    realm = realmDAO.save(realm);
                    RealmTO updated = binder.getRealmTO(realm, true);
                    List<PropagationTaskTO> tasks = propagationManager.createTasks(realm, propByRes, null);
                    taskExecutor.execute(tasks, false);
                    for (PullActions action : profile.getActions()) {
                        action.after(profile, delta, updated, result);
                    }
                    output = updated;
                    resultStatus = Result.SUCCESS;
                    result.setName(updated.getFullPath());
                    LOG.debug("{} successfully updated", updated);
                } catch (PropagationException e) {
                    // A propagation failure doesn't imply a pull failure.
                    // The propagation exception status will be reported into the propagation task execution.
                    LOG.error("Could not propagate Realm {}", delta.getUid().getUidValue(), e);
                    output = e;
                    resultStatus = Result.FAILURE;
                } catch (Exception e) {
                    throwIgnoreProvisionException(delta, e);
                    result.setStatus(ProvisioningReport.Status.FAILURE);
                    result.setMessage(ExceptionUtils.getRootCauseMessage(e));
                    LOG.error("Could not update Realm {}", delta.getUid().getUidValue(), e);
                    output = e;
                    resultStatus = Result.FAILURE;
                }
            }
            finalize(MatchingRule.toEventName(MatchingRule.UPDATE), resultStatus, before, output, delta);
        }
        results.add(result);
    }
    return results;
}
Also used : PropagationTaskTO(org.apache.syncope.common.lib.to.PropagationTaskTO) PullActions(org.apache.syncope.core.provisioning.api.pushpull.PullActions) ArrayList(java.util.ArrayList) RealmTO(org.apache.syncope.common.lib.to.RealmTO) PropagationByResource(org.apache.syncope.core.provisioning.api.PropagationByResource) ProvisioningReport(org.apache.syncope.core.provisioning.api.pushpull.ProvisioningReport) SyncopeClientException(org.apache.syncope.common.lib.SyncopeClientException) DelegatedAdministrationException(org.apache.syncope.core.spring.security.DelegatedAdministrationException) IgnoreProvisionException(org.apache.syncope.core.provisioning.api.pushpull.IgnoreProvisionException) PropagationException(org.apache.syncope.core.provisioning.api.propagation.PropagationException) JobExecutionException(org.quartz.JobExecutionException) Result(org.apache.syncope.common.lib.types.AuditElements.Result) PropagationException(org.apache.syncope.core.provisioning.api.propagation.PropagationException) Realm(org.apache.syncope.core.persistence.api.entity.Realm)

Example 79 with JobExecutionException

use of org.quartz.JobExecutionException in project syncope by apache.

the class ElasticsearchReindex method doExecute.

@Override
protected String doExecute(final boolean dryRun) throws JobExecutionException {
    if (!dryRun) {
        try {
            LOG.debug("Start rebuild index {}", AuthContextUtils.getDomain().toLowerCase());
            IndicesExistsResponse existsIndexResponse = client.admin().indices().exists(new IndicesExistsRequest(AuthContextUtils.getDomain().toLowerCase())).get();
            if (existsIndexResponse.isExists()) {
                DeleteIndexResponse deleteIndexResponse = client.admin().indices().delete(new DeleteIndexRequest(AuthContextUtils.getDomain().toLowerCase())).get();
                LOG.debug("Successfully removed {}: {}", AuthContextUtils.getDomain().toLowerCase(), deleteIndexResponse);
            }
            XContentBuilder settings = XContentFactory.jsonBuilder().startObject().startObject("analysis").startObject("analyzer").startObject("string_lowercase").field("type", "custom").field("tokenizer", "standard").field("filter").startArray().value("lowercase").endArray().endObject().endObject().endObject().endObject();
            XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startArray("dynamic_templates").startObject().startObject("strings").field("match_mapping_type", "string").startObject("mapping").field("type", "keyword").field("analyzer", "string_lowercase").endObject().endObject().endObject().endArray().endObject();
            CreateIndexResponse createIndexResponse = client.admin().indices().create(new CreateIndexRequest(AuthContextUtils.getDomain().toLowerCase()).settings(settings).mapping(AnyTypeKind.USER.name(), mapping).mapping(AnyTypeKind.GROUP.name(), mapping).mapping(AnyTypeKind.ANY_OBJECT.name(), mapping)).get();
            LOG.debug("Successfully created {}: {}", AuthContextUtils.getDomain().toLowerCase(), createIndexResponse);
            LOG.debug("Indexing users...");
            for (int page = 1; page <= (userDAO.count() / AnyDAO.DEFAULT_PAGE_SIZE) + 1; page++) {
                for (User user : userDAO.findAll(page, AnyDAO.DEFAULT_PAGE_SIZE)) {
                    IndexResponse response = client.prepareIndex(AuthContextUtils.getDomain().toLowerCase(), AnyTypeKind.USER.name(), user.getKey()).setSource(elasticsearchUtils.builder(user)).get();
                    LOG.debug("Index successfully created for {}: {}", user, response);
                }
            }
            LOG.debug("Indexing groups...");
            for (int page = 1; page <= (groupDAO.count() / AnyDAO.DEFAULT_PAGE_SIZE) + 1; page++) {
                for (Group group : groupDAO.findAll(page, AnyDAO.DEFAULT_PAGE_SIZE)) {
                    IndexResponse response = client.prepareIndex(AuthContextUtils.getDomain().toLowerCase(), AnyTypeKind.GROUP.name(), group.getKey()).setSource(elasticsearchUtils.builder(group)).get();
                    LOG.debug("Index successfully created for {}: {}", group, response);
                }
            }
            LOG.debug("Indexing any objects...");
            for (int page = 1; page <= (anyObjectDAO.count() / AnyDAO.DEFAULT_PAGE_SIZE) + 1; page++) {
                for (AnyObject anyObject : anyObjectDAO.findAll(page, AnyDAO.DEFAULT_PAGE_SIZE)) {
                    IndexResponse response = client.prepareIndex(AuthContextUtils.getDomain().toLowerCase(), AnyTypeKind.ANY_OBJECT.name(), anyObject.getKey()).setSource(elasticsearchUtils.builder(anyObject)).get();
                    LOG.debug("Index successfully created for {}: {}", anyObject, response);
                }
            }
            LOG.debug("Rebuild index {} successfully completed", AuthContextUtils.getDomain().toLowerCase());
        } catch (Exception e) {
            throw new JobExecutionException("While rebuilding index " + AuthContextUtils.getDomain().toLowerCase(), e);
        }
    }
    return "SUCCESS";
}
Also used : Group(org.apache.syncope.core.persistence.api.entity.group.Group) User(org.apache.syncope.core.persistence.api.entity.user.User) DeleteIndexRequest(org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest) JobExecutionException(org.quartz.JobExecutionException) DeleteIndexResponse(org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse) AnyObject(org.apache.syncope.core.persistence.api.entity.anyobject.AnyObject) JobExecutionException(org.quartz.JobExecutionException) DeleteIndexResponse(org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse) CreateIndexResponse(org.elasticsearch.action.admin.indices.create.CreateIndexResponse) IndexResponse(org.elasticsearch.action.index.IndexResponse) IndicesExistsResponse(org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse) CreateIndexResponse(org.elasticsearch.action.admin.indices.create.CreateIndexResponse) CreateIndexRequest(org.elasticsearch.action.admin.indices.create.CreateIndexRequest) IndicesExistsRequest(org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest) XContentBuilder(org.elasticsearch.common.xcontent.XContentBuilder)

Example 80 with JobExecutionException

use of org.quartz.JobExecutionException in project syncope by apache.

the class AbstractPullResultHandler method handle.

@Override
public boolean handle(final SyncDelta delta) {
    Provision provision = null;
    try {
        provision = profile.getTask().getResource().getProvision(delta.getObject().getObjectClass()).orElse(null);
        if (provision == null) {
            throw new JobExecutionException("No provision found on " + profile.getTask().getResource() + " for " + delta.getObject().getObjectClass());
        }
        doHandle(delta, provision);
        executor.reportHandled(delta.getObjectClass(), delta.getObject().getName());
        LOG.debug("Successfully handled {}", delta);
        if (profile.getTask().getPullMode() != PullMode.INCREMENTAL) {
            if (executor.wasInterruptRequested()) {
                LOG.debug("Pull interrupted");
                executor.setInterrupted();
                return false;
            }
            return true;
        }
        boolean shouldContinue;
        synchronized (this) {
            shouldContinue = latestResult == Result.SUCCESS;
            this.latestResult = null;
        }
        if (shouldContinue) {
            executor.setLatestSyncToken(delta.getObjectClass(), delta.getToken());
        }
        if (executor.wasInterruptRequested()) {
            LOG.debug("Pull interrupted");
            executor.setInterrupted();
            return false;
        }
        return shouldContinue;
    } catch (IgnoreProvisionException e) {
        ProvisioningReport ignoreResult = new ProvisioningReport();
        ignoreResult.setOperation(ResourceOperation.NONE);
        ignoreResult.setAnyType(provision == null ? getAnyUtils().getAnyTypeKind().name() : provision.getAnyType().getKey());
        ignoreResult.setStatus(ProvisioningReport.Status.IGNORE);
        ignoreResult.setKey(null);
        ignoreResult.setName(delta.getObject().getName().getNameValue());
        profile.getResults().add(ignoreResult);
        LOG.warn("Ignoring during pull", e);
        executor.setLatestSyncToken(delta.getObjectClass(), delta.getToken());
        executor.reportHandled(delta.getObjectClass(), delta.getObject().getName());
        return true;
    } catch (JobExecutionException e) {
        LOG.error("Pull failed", e);
        return false;
    }
}
Also used : Provision(org.apache.syncope.core.persistence.api.entity.resource.Provision) JobExecutionException(org.quartz.JobExecutionException) IgnoreProvisionException(org.apache.syncope.core.provisioning.api.pushpull.IgnoreProvisionException) ProvisioningReport(org.apache.syncope.core.provisioning.api.pushpull.ProvisioningReport)

Aggregations

JobExecutionException (org.quartz.JobExecutionException)123 JobDataMap (org.quartz.JobDataMap)35 ArrayList (java.util.ArrayList)18 Date (java.util.Date)16 IgnoreProvisionException (org.apache.syncope.core.provisioning.api.pushpull.IgnoreProvisionException)16 SchedulerException (org.quartz.SchedulerException)16 HashMap (java.util.HashMap)15 ProvisioningReport (org.apache.syncope.core.provisioning.api.pushpull.ProvisioningReport)15 Test (org.junit.Test)13 Result (org.apache.syncope.common.lib.types.AuditElements.Result)12 JobDetail (org.quartz.JobDetail)12 PullActions (org.apache.syncope.core.provisioning.api.pushpull.PullActions)11 SQLException (java.sql.SQLException)10 PropagationException (org.apache.syncope.core.provisioning.api.propagation.PropagationException)10 DelegatedAdministrationException (org.apache.syncope.core.spring.security.DelegatedAdministrationException)10 Map (java.util.Map)9 JobExecutionContext (org.quartz.JobExecutionContext)9 List (java.util.List)8 IOException (java.io.IOException)7 Realm (org.apache.syncope.core.persistence.api.entity.Realm)7