use of org.alfresco.repo.batch.BatchProcessWorkProvider in project alfresco-repository by Alfresco.
the class ReEncryptor method reEncryptProperties.
protected void reEncryptProperties(final List<NodePropertyEntity> properties, final String lockToken) {
final Iterator<NodePropertyEntity> it = properties.iterator();
// TODO use BatchProcessWorkerAdaptor?
BatchProcessor.BatchProcessWorker<NodePropertyEntity> worker = new BatchProcessor.BatchProcessWorker<NodePropertyEntity>() {
public String getIdentifier(NodePropertyEntity entity) {
return String.valueOf(entity.getNodeId());
}
public void beforeProcess() throws Throwable {
refreshLock(lockToken, chunkSize * 100L);
}
public void afterProcess() throws Throwable {
}
public void process(final NodePropertyEntity entity) throws Throwable {
NodePropertyValue nodePropValue = entity.getValue();
// TODO check that we have the correct type i.e. can be cast to Serializable
Serializable value = nodePropValue.getSerializableValue();
if (value instanceof SealedObject) {
SealedObject sealed = (SealedObject) value;
NodePropertyKey propertyKey = entity.getKey();
QName propertyQName = qnameDAO.getQName(propertyKey.getQnameId()).getSecond();
// decrypt...
Serializable decrypted = metadataEncryptor.decrypt(propertyQName, sealed);
// ...and then re-encrypt. The new key will be used.
Serializable resealed = metadataEncryptor.encrypt(propertyQName, decrypted);
// TODO update resealed using batch update?
// does the node DAO do batch updating?
nodeDAO.setNodeProperties(entity.getNodeId(), Collections.singletonMap(propertyQName, resealed));
} else {
NodePropertyKey nodeKey = entity.getKey();
QName propertyQName = qnameDAO.getQName(nodeKey.getQnameId()).getSecond();
logger.warn("Encountered an encrypted property that is not a SealedObject, for node id " + entity.getNodeId() + ", property " + propertyQName);
}
}
};
BatchProcessWorkProvider<NodePropertyEntity> provider = new BatchProcessWorkProvider<NodePropertyEntity>() {
@Override
public int getTotalEstimatedWorkSize() {
return properties.size();
}
@Override
public Collection<NodePropertyEntity> getNextWork() {
List<NodePropertyEntity> sublist = new ArrayList<NodePropertyEntity>(chunkSize);
synchronized (it) {
int count = 0;
while (it.hasNext() && count < chunkSize) {
sublist.add(it.next());
count++;
}
}
return sublist;
}
};
new BatchProcessor<NodePropertyEntity>("Reencryptor", transactionHelper, provider, numThreads, chunkSize, applicationContext, logger, 100).process(worker, splitTxns);
}
use of org.alfresco.repo.batch.BatchProcessWorkProvider in project alfresco-repository by Alfresco.
the class FeedNotifierImpl method executeInternal.
private void executeInternal(final int repeatIntervalMins) {
final String emailTemplateRef = getEmailTemplateRef();
if (emailTemplateRef == null) {
return;
}
final String shareUrl = UrlUtil.getShareUrl(sysAdminParams);
if (logger.isDebugEnabled()) {
logger.debug("Share URL configured as: " + shareUrl);
}
final AtomicInteger userCnt = new AtomicInteger(0);
final AtomicInteger feedEntryCnt = new AtomicInteger(0);
final long startTime = System.currentTimeMillis();
// local cache for this execution
final Map<String, String> siteNames = new ConcurrentHashMap<String, String>(10);
try {
final String currentUser = AuthenticationUtil.getRunAsUser();
final String tenantDomain = TenantUtil.getCurrentDomain();
// process the feeds using the batch processor {@link BatchProcessor}
BatchProcessor.BatchProcessWorker<PersonInfo> worker = new BatchProcessor.BatchProcessWorker<PersonInfo>() {
public String getIdentifier(final PersonInfo person) {
StringBuilder sb = new StringBuilder("Person ");
sb.append(person.getUserName());
return sb.toString();
}
public void beforeProcess() throws Throwable {
AuthenticationUtil.pushAuthentication();
AuthenticationUtil.setFullyAuthenticatedUser(currentUser);
}
public void afterProcess() throws Throwable {
AuthenticationUtil.popAuthentication();
}
public void process(final PersonInfo person) throws Throwable {
final RetryingTransactionHelper txHelper = transactionService.getRetryingTransactionHelper();
txHelper.setMaxRetries(0);
TenantUtil.runAsTenant(new TenantRunAsWork<Void>() {
@Override
public Void doWork() throws Exception {
txHelper.doInTransaction(new RetryingTransactionCallback<Void>() {
public Void execute() throws Throwable {
processInternal(person);
return null;
}
}, false, true);
return null;
}
}, tenantDomain);
}
private void processInternal(final PersonInfo person) throws Exception {
final NodeRef personNodeRef = person.getNodeRef();
try {
Pair<Integer, Long> result = userNotifier.notifyUser(personNodeRef, MSG_EMAIL_SUBJECT, new Object[] { ModelUtil.getProductName(repoAdminService) }, siteNames, shareUrl, repeatIntervalMins, emailTemplateRef);
if (result != null) {
int entryCnt = result.getFirst();
final long maxFeedId = result.getSecond();
Long currentMaxFeedId = (Long) nodeService.getProperty(personNodeRef, ContentModel.PROP_EMAIL_FEED_ID);
if ((currentMaxFeedId == null) || (currentMaxFeedId < maxFeedId)) {
nodeService.setProperty(personNodeRef, ContentModel.PROP_EMAIL_FEED_ID, maxFeedId);
}
userCnt.incrementAndGet();
feedEntryCnt.addAndGet(entryCnt);
}
} catch (InvalidNodeRefException inre) {
// skip this person - eg. no longer exists ?
logger.warn("Skip feed notification for user (" + personNodeRef + "): " + inre.getMessage());
}
}
};
// grab people for the batch processor in chunks of size batchSize
BatchProcessWorkProvider<PersonInfo> provider = new BatchProcessWorkProvider<PersonInfo>() {
private int skip = 0;
private int maxItems = batchSize;
private boolean hasMore = true;
@Override
public int getTotalEstimatedWorkSize() {
return personService.countPeople();
}
@Override
public Collection<PersonInfo> getNextWork() {
if (!hasMore) {
return Collections.emptyList();
}
PagingResults<PersonInfo> people = personService.getPeople(null, null, null, new PagingRequest(skip, maxItems));
List<PersonInfo> page = people.getPage();
skip += page.size();
hasMore = people.hasMoreItems();
return page;
}
};
final RetryingTransactionHelper txHelper = transactionService.getRetryingTransactionHelper();
txHelper.setMaxRetries(0);
new BatchProcessor<PersonInfo>("FeedNotifier", txHelper, provider, numThreads, batchSize, applicationContext, logger, 100).process(worker, true);
} catch (Throwable e) {
// If the VM is shutting down, then ignore
if (vmShutdownListener.isVmShuttingDown()) {
// Ignore
} else {
logger.error("Exception during notification of feeds", e);
}
} finally {
int count = userCnt.get();
int entryCount = feedEntryCnt.get();
// assume sends are synchronous - hence bump up to last max feed id
if (count > 0) {
if (logger.isInfoEnabled()) {
// TODO i18n of info message
StringBuilder sb = new StringBuilder();
sb.append("Notified ").append(userCnt).append(" user").append(count != 1 ? "s" : "");
sb.append(" of ").append(feedEntryCnt).append(" activity feed entr").append(entryCount != 1 ? "ies" : "y");
sb.append(" (in ").append(System.currentTimeMillis() - startTime).append(" msecs)");
logger.info(sb.toString());
}
} else {
if (logger.isTraceEnabled()) {
logger.trace("Nothing to send since no new user activities found");
}
}
}
}
use of org.alfresco.repo.batch.BatchProcessWorkProvider in project alfresco-repository by Alfresco.
the class LocalFeedGenerator method generate.
protected boolean generate() throws Exception {
final Long maxSequence = getPostDaoService().getMaxActivitySeq();
final Long minSequence = getPostDaoService().getMinActivitySeq();
final Integer maxNodeHash = getPostDaoService().getMaxNodeHash();
if ((maxSequence == null) || (minSequence == null) || (maxNodeHash == null)) {
return false;
}
// TODO ... or push this upto to job scheduler ... ?
AuthenticationUtil.runAs(new RunAsWork<Object>() {
public Object doWork() {
getWebScriptsCtx().setTicket(getAuthenticationService().getCurrentTicket());
return null;
}
}, // need web scripts to support System-level authentication ... see RepositoryContainer !
AuthenticationUtil.getSystemUserName());
// process the activity posts using the batch processor {@link BatchProcessor}
BatchProcessor.BatchProcessWorker<JobSettings> worker = new BatchProcessor.BatchProcessWorker<JobSettings>() {
@Override
public String getIdentifier(final JobSettings js) {
// TODO
StringBuilder sb = new StringBuilder("JobSettings ");
sb.append(js);
return sb.toString();
}
@Override
public void beforeProcess() throws Throwable {
}
@Override
public void afterProcess() throws Throwable {
}
@Override
public void process(final JobSettings js) throws Throwable {
final RetryingTransactionHelper txHelper = getTransactionService().getRetryingTransactionHelper();
txHelper.setMaxRetries(0);
txHelper.doInTransaction(new RetryingTransactionCallback<Void>() {
public Void execute() throws Throwable {
int jobTaskNode = js.getJobTaskNode();
long minSeq = js.getMinSeq();
long maxSeq = js.getMaxSeq();
RepoCtx webScriptsCtx = js.getWebScriptsCtx();
// FeedTaskProcessor takes JobSettings parameters instead collection of ActivityPost. FeedTaskProcessor can be refactored.
feedTaskProcessor.process(jobTaskNode, minSeq, maxSeq, webScriptsCtx);
return null;
}
}, false, true);
}
};
// provides a JobSettings object
BatchProcessWorkProvider<JobSettings> provider = new BatchProcessWorkProvider<JobSettings>() {
private Long skip = minSequence;
private boolean hasMore = true;
@Override
public int getTotalEstimatedWorkSize() {
long size = maxSequence - minSequence + 1;
long remain = size % batchSize;
long workSize = (remain == 0) ? (size / batchSize) : (size / batchSize + 1);
return (int) workSize;
}
@Override
public Collection<JobSettings> getNextWork() {
if (!hasMore) {
return Collections.emptyList();
}
JobSettings js = new JobSettings();
js.setMinSeq(skip);
js.setMaxSeq(skip + batchSize - 1);
js.setJobTaskNode(maxNodeHash);
js.setWebScriptsCtx(getWebScriptsCtx());
skip += batchSize;
hasMore = skip > maxSequence ? false : true;
// One JobSettings object will be returned. Because FeedTaskProcessor fetches list activity posts by itself before processing.
List<JobSettings> result = new ArrayList<JobSettings>(1);
result.add(js);
return result;
}
};
final RetryingTransactionHelper txHelper = getTransactionService().getRetryingTransactionHelper();
txHelper.setMaxRetries(0);
// batchSize and loggingInterval parameters are equal 1 because provider always will provide collection with one JobSettings object.
// FeedTaskProcessor fetches list activity posts by itself before processing. It needs only JobSettings parameters. FeedTaskProcessor can be refactored.
new BatchProcessor<JobSettings>("LocalFeedGenerator", txHelper, provider, numThreads, 1, null, logger, 1).process(worker, true);
return true;
}
Aggregations