use of org.commonjava.cdi.util.weft.DrainingExecutorCompletionService in project indy by Commonjava.
the class PromotionManager method runPathPromotions.
private PathsPromoteResult runPathPromotions(final PathsPromoteRequest request, final Set<String> pending, final List<Transfer> contents, final ValidationResult validation) {
long begin = System.currentTimeMillis();
PromotionHelper.PromotionRepoRetrievalResult checkResult = promotionHelper.checkAndRetrieveSourceAndTargetRepos(request);
if (checkResult.hasErrors()) {
return new PathsPromoteResult(request, pending, emptySet(), emptySet(), StringUtils.join(checkResult.errors, "\n"), validation);
}
final ArtifactStore targetStore = checkResult.targetStore;
StoreKey targetKey = targetStore.getKey();
logger.info("Run promotion from: {} to: {}, paths: {}", request.getSource(), targetKey, pending);
Set<Group> affectedGroups;
try {
affectedGroups = storeManager.query().getGroupsAffectedBy(targetKey);
logger.info("Calculate affected groups, target: {}, affected-groups: {}", targetKey, affectedGroups);
} catch (IndyDataException e) {
logger.error("Get affected groups failed", e);
return new PathsPromoteResult(request, pending, emptySet(), emptySet(), "Get affected groups failed, " + e.getMessage(), validation);
}
DrainingExecutorCompletionService<Set<PathTransferResult>> svc = new DrainingExecutorCompletionService<>(transferService);
int corePoolSize = transferService.getCorePoolSize();
int size = contents.size();
int batchSize = getParalleledBatchSize(size, corePoolSize);
logger.info("Execute parallel on collection, size: {}, batch: {}", size, batchSize);
Collection<Collection<Transfer>> batches = batch(contents, batchSize);
final List<String> errors = new ArrayList<>();
try {
detectOverloadVoid(() -> batches.forEach(batch -> svc.submit(newPathPromotionsJob(batch, targetStore, request, affectedGroups))));
} catch (IndyWorkflowException e) {
// might be PoolOverloadException. Log it and continue to revert any completed paths
String msg = String.format("Failed to submit all path promotion jobs. Error: %s", e.toString());
logger.error(msg, e);
errors.add(msg);
}
final Set<PathTransferResult> results = new HashSet<>();
try {
svc.drain(results::addAll);
} catch (InterruptedException | ExecutionException e) {
String msg = String.format("Error waiting for promotion of: %s to: %s", request.getSource(), request.getTarget());
logger.error(msg, e);
errors.add(msg);
}
final Set<String> completed = new HashSet<>();
final Set<String> skipped = new HashSet<>();
results.forEach(result -> {
if (result.error != null) {
errors.add(result.error);
} else if (result.skipped) {
skipped.add(result.path);
} else {
completed.add(result.path);
}
});
PathsPromoteResult result;
if (!errors.isEmpty()) {
List<String> rollbackErrors = promotionHelper.deleteFromStore(completed, targetStore);
errors.addAll(rollbackErrors);
result = new PathsPromoteResult(request, pending, emptySet(), emptySet(), StringUtils.join(errors, "\n"), validation);
} else {
result = new PathsPromoteResult(request, emptySet(), completed, skipped, null, validation);
final String name = String.format("PromoteNFCClean-method(%s)-source(%s)-target(%s)", "runPathPromotions", request.getSource(), targetStore.getKey());
final String context = String.format("Class: %s, method: %s, source: %s, target: %s", this.getClass().getName(), "runPathPromotions", request.getSource(), targetStore.getKey());
storeManager.asyncGroupAffectedBy(new StoreDataManager.ContextualTask(name, context, () -> promotionHelper.clearStoreNFC(completed, targetStore, affectedGroups)));
if (request.isFireEvents()) {
fireEvent(promoteCompleteEvent, new PathsPromoteCompleteEvent(result));
}
}
logger.info("Promotion completed, promotionId: {}, timeInSeconds: {}", request.getPromotionId(), timeInSeconds(begin));
return result;
}
use of org.commonjava.cdi.util.weft.DrainingExecutorCompletionService in project indy by Commonjava.
the class PromotionValidator method validate.
/**
* NOTE: As of Indy 1.2.6, ValidationRequest passed back to enable further post-processing, especially of promotion
* paths, after promotion takes place. This enables us to avoid re-executing recursive path discovery, for instance.
*
* @param request
* @param result
* @param baseUrl
* @return
* @throws PromotionValidationException
* @throws IndyWorkflowException
*/
@Measure
public ValidationRequest validate(PromoteRequest request, ValidationResult result, String baseUrl) throws PromotionValidationException, IndyWorkflowException {
ValidationRuleSet set = validationsManager.getRuleSetMatching(request.getTargetKey());
ArtifactStore source;
try {
source = storeDataMgr.getArtifactStore(request.getSource());
} catch (IndyDataException e) {
throw new PromotionValidationException(String.format("Failed to retrieve source ArtifactStore: %s for validation", request.getSource()), e);
}
if (set != null) {
result.setRuleSet(set.getName());
RequestContextHelper.setContext(PROMOTION_VALIDATION_RULE_SET, set.getName());
logger.debug("Running validation rule-set for promotion: {}", set.getName());
List<String> ruleNames = set.getRuleNames();
if (ruleNames != null && !ruleNames.isEmpty()) {
final ArtifactStore store = getRequestStore(request, baseUrl);
final ValidationRequest req = new ValidationRequest(request, set, validationTools, store);
try {
DrainingExecutorCompletionService<Exception> svc = new DrainingExecutorCompletionService<>(validateService);
detectOverloadVoid(() -> {
for (String ruleRef : ruleNames) {
svc.submit(() -> {
RequestContextHelper.setContext(PROMOTION_VALIDATION_RULE, ruleRef);
Exception err = null;
try {
executeValidationRule(ruleRef, req, result, request);
} catch (Exception e) {
err = e;
} finally {
RequestContextHelper.clearContext(PROMOTION_VALIDATION_RULE);
}
return err;
});
}
});
List<String> errors = new ArrayList<>();
svc.drain(err -> {
if (err != null) {
logger.error("Promotion validation failure", err);
errors.add(err.getMessage());
}
});
if (!errors.isEmpty()) {
throw new PromotionValidationException(format("Failed to do promotion validation: \n\n%s", join(errors, "\n")));
}
} catch (InterruptedException e) {
throw new PromotionValidationException("Failed to do promotion validation: validation execution has been interrupted ", e);
} catch (ExecutionException e) {
throw new PromotionValidationException("Failed to execute promotion validations", e);
} finally {
if (needTempRepo(request)) {
try {
final String changeSum = format("Removes the temp remote repo [%s] after promote operation.", store);
storeDataMgr.deleteArtifactStore(store.getKey(), new ChangeSummary(ChangeSummary.SYSTEM_USER, changeSum), new EventMetadata().set(ContentManager.SUPPRESS_EVENTS, true));
Transfer root = downloadManager.getStoreRootDirectory(store);
if (root.exists()) {
root.delete(false);
}
logger.debug("Promotion temporary repo {} has been deleted for {}", store.getKey(), request.getSource());
} catch (IndyDataException | IOException e) {
logger.warn("Temporary promotion validation repository was NOT removed correctly.", e);
}
}
}
return req;
} else {
logger.info("No validation rules are defined for: {}", request.getTargetKey());
return new ValidationRequest(request, set, validationTools, source);
}
} else {
logger.info("No validation rule-sets are defined for: {}", request.getTargetKey());
return new ValidationRequest(request, set, validationTools, source);
}
}
use of org.commonjava.cdi.util.weft.DrainingExecutorCompletionService in project indy by Commonjava.
the class StoreContentListener method clearPaths.
/**
* List the paths in target store and clean up the paths in affected groups.
*
* If groups are given, use them (for group update since all members share same group hierarchy). Otherwise,
* query the affected groups (for store deletion and dis/enable event).
*/
private void clearPaths(final Set<StoreKey> keys, Predicate<? super String> pathFilter, final Set<Group> groups, final boolean deleteOriginPath) {
// NOSSUP-76 we still need to use synchronized/drain way to clean the paths now, because sometimes the new used metadata
// not updated in time when some builds want to consume them as the obsolete metadata not cleared under
// async way.
DrainingExecutorCompletionService<Integer> clearService = new DrainingExecutorCompletionService<>(cleanupExecutor);
keys.forEach(key -> {
ArtifactStore origin;
try {
origin = storeDataManager.getArtifactStore(key);
} catch (IndyDataException e) {
logger.error("Failed to retrieve store: " + key, e);
return;
}
Set<Group> affected = groups;
if (affected == null) {
try {
affected = (storeDataManager.query().getGroupsAffectedBy(key));
} catch (IndyDataException e) {
logger.error("Failed to retrieve groups affected by: " + key, e);
return;
}
}
logger.debug("Submit clean job for origin: {}", origin);
final Set<Group> affectedGroups = affected;
clearService.submit(clearPathsProcessor(origin, pathFilter, affectedGroups, deleteOriginPath));
});
drainAndCount(clearService, "stores: " + keys);
}
use of org.commonjava.cdi.util.weft.DrainingExecutorCompletionService in project indy by Commonjava.
the class DefaultDirectContentAccess method listRaw.
@Override
public Map<String, List<StoreResource>> listRaw(ArtifactStore store, List<String> parentPathList, EventMetadata eventMetadata) throws IndyWorkflowException {
DrainingExecutorCompletionService<StoreListingResult> svc = new DrainingExecutorCompletionService<>(contentAccessService);
Logger logger = LoggerFactory.getLogger(getClass());
detectOverloadVoid(() -> {
for (final String path : parentPathList) {
logger.trace("Requesting listing of {} in {}", path, store);
svc.submit(() -> {
addFieldToActiveSpan("storekey", store.getKey().toString());
addFieldToActiveSpan("path", path);
addFieldToActiveSpan("activity", "listRaw");
logger.trace("Starting listing of {} in {}", path, store);
List<StoreResource> listRaw = listRaw(store, path, eventMetadata);
logger.trace("Listing of {} in {} finished", path, store);
return new StoreListingResult(path, listRaw);
});
}
});
final Map<String, List<StoreResource>> result = new HashMap<>();
try {
svc.drain(slr -> result.put(slr.path, slr.listing));
} catch (InterruptedException ex) {
throw new IndyWorkflowException("Listing retrieval in %s was interrupted", ex, store);
} catch (ExecutionException ex) {
throw new IndyWorkflowException("There was an error in listing retrieval for %s: %s", ex, store, ex);
}
return result;
}
use of org.commonjava.cdi.util.weft.DrainingExecutorCompletionService in project indy by Commonjava.
the class MavenMetadataGenerator method mergeMissing.
private MetadataIncrementalResult mergeMissing(final Group group, final MetadataIncrementalResult incrementalResult, final String toMergePath, String description, BiFunction<ArtifactStore, String, Callable<MetadataResult>> func) throws IndyWorkflowException {
Set<ArtifactStore> missing = incrementalResult.missing;
Metadata master = incrementalResult.result;
logger.debug("Merge member metadata for {}, {}, missing: {}, size: {}", group.getKey(), description, missing, missing.size());
DrainingExecutorCompletionService<MetadataResult> svc = new DrainingExecutorCompletionService<>(mavenMDGeneratorService);
detectOverloadVoid(() -> missing.forEach(store -> svc.submit(func.apply(store, toMergePath))));
// return stores failed download
Set<ArtifactStore> resultingMissing = new HashSet<>();
Set<StoreKey> included = new HashSet<>();
try {
svc.drain(mr -> {
if (mr != null) {
if (mr.missing) {
resultingMissing.add(mr.store);
} else {
included.add(mr.store.getKey());
merger.merge(master, mr.metadata, group, toMergePath);
putToMetadataCache(mr.store.getKey(), toMergePath, new MetadataInfo(mr.metadata));
}
}
});
} catch (InterruptedException e) {
logger.debug("Interrupted while merging " + description + " member metadata.");
} catch (ExecutionException e) {
throw new IndyWorkflowException("Failed to merge downloaded " + description + " member metadata.", e);
}
return new MetadataIncrementalResult(resultingMissing, included, master);
}
Aggregations