use of com.google.gerrit.server.update.SubmissionExecutor in project gerrit by GerritCodeReview.
the class MergeOp method merge.
/**
* Merges the given change.
*
* <p>Depending on the server configuration, more changes may be affected, e.g. by submission of a
* topic or via superproject subscriptions. All affected changes are integrated using the projects
* integration strategy.
*
* @param change the change to be merged.
* @param caller the identity of the caller
* @param checkSubmitRules whether the prolog submit rules should be evaluated
* @param submitInput parameters regarding the merge
* @throws RestApiException if an error occurred.
* @throws PermissionBackendException if permissions can't be checked
* @throws IOException an error occurred reading from NoteDb.
* @return the merged change
*/
public Change merge(Change change, IdentifiedUser caller, boolean checkSubmitRules, SubmitInput submitInput, boolean dryrun) throws RestApiException, UpdateException, IOException, ConfigInvalidException, PermissionBackendException {
this.submitInput = submitInput;
this.notify = notifyResolver.resolve(firstNonNull(submitInput.notify, NotifyHandling.ALL), submitInput.notifyDetails);
this.dryrun = dryrun;
this.caller = caller;
this.ts = TimeUtil.now();
this.submissionId = new SubmissionId(change);
try (TraceContext traceContext = TraceContext.open().addTag(RequestId.Type.SUBMISSION_ID, new RequestId(submissionId.toString()))) {
openRepoManager();
logger.atFine().log("Beginning integration of %s", change);
try {
ChangeSet indexBackedChangeSet = mergeSuperSet.setMergeOpRepoManager(orm).completeChangeSet(change, caller, /* includingTopicClosure= */
false);
if (!indexBackedChangeSet.ids().contains(change.getId())) {
// indexBackedChangeSet contains only open changes, if the change is missing in this set
// it might be that the change was concurrently submitted in the meantime.
change = changeDataFactory.create(change).reloadChange();
if (!change.isNew()) {
throw new ResourceConflictException("change is " + ChangeUtil.status(change));
}
throw new IllegalStateException(String.format("change %s missing from %s", change.getId(), indexBackedChangeSet));
}
if (indexBackedChangeSet.furtherHiddenChanges()) {
throw new AuthException("A change to be submitted with " + change.getId() + " is not visible");
}
logger.atFine().log("Calculated to merge %s", indexBackedChangeSet);
// Reload ChangeSet so that we don't rely on (potentially) stale index data for merging
ChangeSet noteDbChangeSet = reloadChanges(indexBackedChangeSet);
// At this point, any change that isn't new can be filtered out since they were only here
// in the first place due to stale index.
List<ChangeData> filteredChanges = new ArrayList<>();
for (ChangeData changeData : noteDbChangeSet.changes()) {
if (!changeData.change().getStatus().equals(Status.NEW)) {
logger.atFine().log("Change %s has status %s due to stale index, so it is skipped during submit", changeData.getId(), changeData.change().getStatus().name());
continue;
}
filteredChanges.add(changeData);
}
// There are no hidden changes (or else we would have thrown AuthException above).
ChangeSet filteredNoteDbChangeSet = new ChangeSet(filteredChanges, /* hiddenChanges= */
ImmutableList.of());
// Count cross-project submissions outside of the retry loop. The chance of a single project
// failing increases with the number of projects, so the failure count would be inflated if
// this metric were incremented inside of integrateIntoHistory.
int projects = filteredNoteDbChangeSet.projects().size();
if (projects > 1) {
topicMetrics.topicSubmissions.increment();
}
SubmissionExecutor submissionExecutor = new SubmissionExecutor(dryrun, superprojectUpdateSubmissionListeners);
RetryTracker retryTracker = new RetryTracker();
retryHelper.changeUpdate("integrateIntoHistory", updateFactory -> {
long attempt = retryTracker.lastAttemptNumber + 1;
boolean isRetry = attempt > 1;
if (isRetry) {
logger.atFine().log("Retrying, attempt #%d; skipping merged changes", attempt);
this.ts = TimeUtil.now();
openRepoManager();
}
this.commitStatus = new CommitStatus(filteredNoteDbChangeSet, isRetry);
if (checkSubmitRules) {
logger.atFine().log("Checking submit rules and state");
checkSubmitRulesAndState(filteredNoteDbChangeSet, isRetry);
} else {
logger.atFine().log("Bypassing submit rules");
bypassSubmitRulesAndRequirements(filteredNoteDbChangeSet);
}
integrateIntoHistory(filteredNoteDbChangeSet, submissionExecutor);
return null;
}).listener(retryTracker).defaultTimeoutMultiplier(filteredNoteDbChangeSet.projects().size() * 2).retryOn(t -> t instanceof RuntimeException).call();
submissionExecutor.afterExecutions(orm);
if (projects > 1) {
topicMetrics.topicSubmissionsCompleted.increment();
}
// (e.g. caller provided a change that was already merged).
return updatedChanges.containsKey(change.getId()) ? updatedChanges.get(change.getId()) : change;
} catch (IOException e) {
// Anything before the merge attempt is an error
throw new StorageException(e);
}
}
}
use of com.google.gerrit.server.update.SubmissionExecutor in project gerrit by GerritCodeReview.
the class ReceiveCommits method handleRegularCommands.
private void handleRegularCommands(List<ReceiveCommand> cmds, MultiProgressMonitor progress) throws PermissionBackendException, IOException, NoSuchProjectException {
try (TraceTimer traceTimer = newTimer("handleRegularCommands", Metadata.builder().resourceCount(cmds.size()))) {
result.magicPush(false);
for (ReceiveCommand cmd : cmds) {
parseRegularCommand(cmd);
}
Map<BranchNameKey, ReceiveCommand> branches;
try (BatchUpdate bu = batchUpdateFactory.create(project.getNameKey(), user.materializedCopy(), TimeUtil.now());
ObjectInserter ins = repo.newObjectInserter();
ObjectReader reader = ins.newReader();
RevWalk rw = new RevWalk(reader);
MergeOpRepoManager orm = ormProvider.get()) {
bu.setRepository(repo, rw, ins);
bu.setRefLogMessage("push");
int added = 0;
for (ReceiveCommand cmd : cmds) {
if (cmd.getResult() == NOT_ATTEMPTED) {
bu.addRepoOnlyOp(new UpdateOneRefOp(cmd));
added++;
}
}
logger.atFine().log("Added %d additional ref updates", added);
SubmissionExecutor submissionExecutor = new SubmissionExecutor(false, superprojectUpdateSubmissionListeners);
submissionExecutor.execute(ImmutableList.of(bu));
orm.setContext(TimeUtil.now(), user, NotifyResolver.Result.none());
submissionExecutor.afterExecutions(orm);
branches = bu.getSuccessfullyUpdatedBranches(false);
} catch (UpdateException | RestApiException e) {
throw new StorageException(e);
}
// This could be moved into a SubmissionListener
branches.values().stream().filter(c -> isHead(c) || isConfig(c)).forEach(c -> {
// BatchUpdate because they involve kicking off an additional BatchUpdate.
switch(c.getType()) {
case CREATE:
case UPDATE:
case UPDATE_NONFASTFORWARD:
Task closeProgress = progress.beginSubTask("closed", UNKNOWN);
autoCloseChanges(c, closeProgress);
closeProgress.end();
break;
case DELETE:
break;
}
});
}
}
use of com.google.gerrit.server.update.SubmissionExecutor in project gerrit by GerritCodeReview.
the class MergeOp method integrateIntoHistory.
private void integrateIntoHistory(ChangeSet cs, SubmissionExecutor submissionExecutor) throws RestApiException, UpdateException {
checkArgument(!cs.furtherHiddenChanges(), "cannot integrate hidden changes into history");
logger.atFine().log("Beginning merge attempt on %s", cs);
Map<BranchNameKey, BranchBatch> toSubmit = new HashMap<>();
ListMultimap<BranchNameKey, ChangeData> cbb;
try {
cbb = cs.changesByBranch();
} catch (StorageException e) {
throw new StorageException("Error reading changes to submit", e);
}
Set<BranchNameKey> branches = cbb.keySet();
for (BranchNameKey branch : branches) {
OpenRepo or = openRepo(branch.project());
if (or != null) {
toSubmit.put(branch, validateChangeList(or, cbb.get(branch)));
}
}
// Done checks that don't involve running submit strategies.
commitStatus.maybeFailVerbose();
try {
SubscriptionGraph subscriptionGraph = subscriptionGraphFactory.compute(branches, orm);
SubmoduleCommits submoduleCommits = submoduleCommitsFactory.create(orm);
UpdateOrderCalculator updateOrderCalculator = new UpdateOrderCalculator(subscriptionGraph);
List<SubmitStrategy> strategies = getSubmitStrategies(toSubmit, updateOrderCalculator, submoduleCommits, subscriptionGraph, dryrun);
this.allProjects = updateOrderCalculator.getProjectsInOrder();
List<BatchUpdate> batchUpdates = orm.batchUpdates(allProjects);
// Group batch updates by project
Map<Project.NameKey, BatchUpdate> batchUpdatesByProject = batchUpdates.stream().collect(Collectors.toMap(b -> b.getProject(), Function.identity()));
for (Map.Entry<Change.Id, ChangeData> entry : cs.changesById().entrySet()) {
Project.NameKey project = entry.getValue().project();
Change.Id changeId = entry.getKey();
ChangeData cd = entry.getValue();
batchUpdatesByProject.get(project).addOp(changeId, storeSubmitRequirementsOpFactory.create(cd.submitRequirementsIncludingLegacy().values(), cd));
}
try {
submissionExecutor.setAdditionalBatchUpdateListeners(ImmutableList.of(new SubmitStrategyListener(submitInput, strategies, commitStatus)));
submissionExecutor.execute(batchUpdates);
} finally {
// If the BatchUpdate fails it can be that merging some of the changes was actually
// successful. This is why we must to collect the updated changes also when an
// exception was thrown.
strategies.forEach(s -> updatedChanges.putAll(s.getUpdatedChanges()));
// Do not leave executed BatchUpdates in the OpenRepos
if (!dryrun) {
orm.resetUpdates(ImmutableSet.copyOf(this.allProjects));
}
}
} catch (NoSuchProjectException e) {
throw new ResourceNotFoundException(e.getMessage());
} catch (IOException e) {
throw new StorageException(e);
} catch (SubmoduleConflictException e) {
throw new IntegrationConflictException(e.getMessage(), e);
} catch (UpdateException e) {
if (e.getCause() instanceof LockFailureException) {
// as to be unnoticeable, assuming RetryHelper is retrying sufficiently.
throw e;
}
// inner IntegrationConflictException to a ResourceConflictException.
if (e.getCause() instanceof IntegrationConflictException) {
throw (IntegrationConflictException) e.getCause();
}
throw new MergeUpdateException(genericMergeError(cs), e);
}
}
Aggregations