use of com.google.gerrit.entities.BranchNameKey in project gerrit by GerritCodeReview.
the class DeleteRef method createDeleteCommand.
private ReceiveCommand createDeleteCommand(ProjectState projectState, Repository r, String refName) throws IOException, ResourceConflictException, PermissionBackendException {
Ref ref = r.getRefDatabase().exactRef(refName);
ReceiveCommand command;
if (ref == null) {
command = new ReceiveCommand(ObjectId.zeroId(), ObjectId.zeroId(), refName);
command.setResult(Result.REJECTED_OTHER_REASON, "it doesn't exist or you do not have permission to delete it");
return command;
}
command = new ReceiveCommand(ref.getObjectId(), ObjectId.zeroId(), ref.getName());
if (isConfigRef(refName)) {
// Never allow to delete the meta config branch.
command.setResult(Result.REJECTED_OTHER_REASON, "not allowed to delete branch " + refName);
} else {
try {
permissionBackend.currentUser().project(projectState.getNameKey()).ref(refName).check(RefPermission.DELETE);
} catch (AuthException denied) {
command.setResult(Result.REJECTED_OTHER_REASON, "it doesn't exist or you do not have permission to delete it");
}
}
if (!projectState.statePermitsWrite()) {
command.setResult(Result.REJECTED_OTHER_REASON, "project state does not permit write");
}
if (!refName.startsWith(R_TAGS)) {
BranchNameKey branchKey = BranchNameKey.create(projectState.getNameKey(), ref.getName());
if (!queryProvider.get().setLimit(1).byBranchOpen(branchKey).isEmpty()) {
command.setResult(Result.REJECTED_OTHER_REASON, "it has open changes");
}
}
RefUpdate u = r.updateRef(refName);
u.setForceUpdate(true);
u.setExpectedOldObjectId(r.exactRef(refName).getObjectId());
u.setNewObjectId(ObjectId.zeroId());
refDeletionValidator.validateRefOperation(projectState.getName(), identifiedUser.get(), u, /* pushOptions */
ImmutableListMultimap.of());
return command;
}
use of com.google.gerrit.entities.BranchNameKey in project gerrit by GerritCodeReview.
the class AbstractDaemonTest method fetchFromBundles.
/**
* Fetches each bundle into a newly cloned repository, then it applies the bundle, and returns the
* resulting tree id.
*
* <p>Omits NoteDb meta refs.
*/
protected Map<BranchNameKey, ObjectId> fetchFromBundles(BinaryResult bundles) throws Exception {
assertThat(bundles.getContentType()).isEqualTo("application/x-zip");
FileSystem fs = Jimfs.newFileSystem();
Path previewPath = fs.getPath("preview.zip");
try (OutputStream out = Files.newOutputStream(previewPath)) {
bundles.writeTo(out);
}
Map<BranchNameKey, ObjectId> ret = new HashMap<>();
try (FileSystem zipFs = FileSystems.newFileSystem(previewPath, (ClassLoader) null);
DirectoryStream<Path> dirStream = Files.newDirectoryStream(Iterables.getOnlyElement(zipFs.getRootDirectories()))) {
for (Path p : dirStream) {
if (!Files.isRegularFile(p)) {
continue;
}
String bundleName = p.getFileName().toString();
int len = bundleName.length();
assertThat(bundleName).endsWith(".git");
String repoName = bundleName.substring(0, len - 4);
Project.NameKey proj = Project.nameKey(repoName);
TestRepository<?> localRepo = cloneProject(proj);
try (InputStream bundleStream = Files.newInputStream(p);
TransportBundleStream tbs = new TransportBundleStream(localRepo.getRepository(), new URIish(bundleName), bundleStream)) {
FetchResult fr = tbs.fetch(NullProgressMonitor.INSTANCE, Arrays.asList(new RefSpec("refs/*:refs/preview/*")));
for (Ref r : fr.getAdvertisedRefs()) {
String refName = r.getName();
if (RefNames.isNoteDbMetaRef(refName)) {
continue;
}
RevCommit c = localRepo.getRevWalk().parseCommit(r.getObjectId());
ret.put(BranchNameKey.create(proj, refName), c.getTree().copy());
}
}
}
}
assertThat(ret).isNotEmpty();
return ret;
}
use of com.google.gerrit.entities.BranchNameKey in project gerrit by GerritCodeReview.
the class AbstractDaemonTest method assertTrees.
/**
* Assert that the given branches have the given tree ids.
*/
protected void assertTrees(Project.NameKey proj, Map<BranchNameKey, ObjectId> trees) throws Exception {
TestRepository<?> localRepo = cloneProject(proj);
GitUtil.fetch(localRepo, "refs/*:refs/*");
Map<BranchNameKey, RevTree> refValues = new HashMap<>();
for (BranchNameKey b : trees.keySet()) {
if (!b.project().equals(proj)) {
continue;
}
Ref r = localRepo.getRepository().exactRef(b.branch());
assertThat(r).isNotNull();
RevWalk rw = localRepo.getRevWalk();
RevCommit c = rw.parseCommit(r.getObjectId());
refValues.put(b, c.getTree());
assertThat(trees.get(b)).isEqualTo(refValues.get(b));
}
assertThat(refValues.keySet()).containsAnyIn(trees.keySet());
}
use of com.google.gerrit.entities.BranchNameKey in project gerrit by GerritCodeReview.
the class ReceiveCommits method autoCloseChanges.
private void autoCloseChanges(ReceiveCommand cmd, Task progress) {
try (TraceTimer traceTimer = newTimer("autoCloseChanges")) {
logger.atFine().log("Starting auto-closing of changes");
String refName = cmd.getRefName();
Set<Change.Id> ids = new HashSet<>();
// handleRegularCommands
try {
retryHelper.changeUpdate("autoCloseChanges", updateFactory -> {
try (BatchUpdate bu = updateFactory.create(projectState.getNameKey(), user, TimeUtil.now());
ObjectInserter ins = repo.newObjectInserter();
ObjectReader reader = ins.newReader();
RevWalk rw = new RevWalk(reader)) {
if (ObjectId.zeroId().equals(cmd.getOldId())) {
// potentially expensive computation that loop over all commits.
return null;
}
bu.setRepository(repo, rw, ins);
// TODO(dborowitz): Teach BatchUpdate to ignore missing changes.
RevCommit newTip = rw.parseCommit(cmd.getNewId());
BranchNameKey branch = BranchNameKey.create(project.getNameKey(), refName);
rw.reset();
rw.sort(RevSort.REVERSE);
rw.markStart(newTip);
rw.markUninteresting(rw.parseCommit(cmd.getOldId()));
Map<Change.Key, ChangeNotes> byKey = null;
List<ReplaceRequest> replaceAndClose = new ArrayList<>();
int existingPatchSets = 0;
int newPatchSets = 0;
SubmissionId submissionId = null;
COMMIT: for (RevCommit c; (c = rw.next()) != null; ) {
rw.parseBody(c);
// refs pointing to this commit.
for (PatchSet.Id psId : receivePackRefCache.patchSetIdsFromObjectId(c.copy())) {
Optional<ChangeNotes> notes = getChangeNotes(psId.changeId());
if (notes.isPresent() && notes.get().getChange().getDest().equals(branch)) {
if (submissionId == null) {
submissionId = new SubmissionId(notes.get().getChange());
}
existingPatchSets++;
bu.addOp(notes.get().getChangeId(), setPrivateOpFactory.create(false, null));
bu.addOp(psId.changeId(), mergedByPushOpFactory.create(requestScopePropagator, psId, submissionId, refName, newTip.getId().getName()));
continue COMMIT;
}
}
for (String changeId : ChangeUtil.getChangeIdsFromFooter(c, urlFormatter.get())) {
if (byKey == null) {
byKey = retryHelper.changeIndexQuery("queryOpenChangesByKeyByBranch", q -> openChangesByKeyByBranch(q, branch)).call();
}
ChangeNotes onto = byKey.get(Change.key(changeId.trim()));
if (onto != null) {
newPatchSets++;
// Hold onto this until we're done with the walk, as the call to
// req.validate below calls isMergedInto which resets the walk.
ReplaceRequest req = new ReplaceRequest(onto.getChangeId(), c, cmd, false);
req.notes = onto;
replaceAndClose.add(req);
continue COMMIT;
}
}
}
for (ReplaceRequest req : replaceAndClose) {
Change.Id id = req.notes.getChangeId();
if (!req.validateNewPatchSetForAutoClose()) {
logger.atFine().log("Not closing %s because validation failed", id);
continue;
}
if (submissionId == null) {
submissionId = new SubmissionId(req.notes.getChange());
}
req.addOps(bu, null);
bu.addOp(id, setPrivateOpFactory.create(false, null));
bu.addOp(id, mergedByPushOpFactory.create(requestScopePropagator, req.psId, submissionId, refName, newTip.getId().getName()).setPatchSetProvider(req.replaceOp::getPatchSet));
bu.addOp(id, new ChangeProgressOp(progress));
ids.add(id);
}
logger.atFine().log("Auto-closing %d changes with existing patch sets and %d with new patch" + " sets", existingPatchSets, newPatchSets);
bu.execute();
} catch (IOException | StorageException | PermissionBackendException e) {
throw new StorageException("Failed to auto-close changes", e);
}
// If we are here, we didn't throw UpdateException. Record the result.
// The ordering is indeterminate due to the HashSet; unfortunately, Change.Id
// doesn't
// fit into TreeSet.
ids.stream().forEach(id -> result.addChange(ReceiveCommitsResult.ChangeStatus.AUTOCLOSED, id));
return null;
}).defaultTimeoutMultiplier(5).call();
} catch (RestApiException e) {
logger.atSevere().withCause(e).log("Can't insert patchset");
} catch (UpdateException e) {
logger.atSevere().withCause(e).log("Failed to auto-close changes");
} finally {
logger.atFine().log("Done auto-closing changes");
}
}
}
use of com.google.gerrit.entities.BranchNameKey in project gerrit by GerritCodeReview.
the class ReceiveCommits method handleRegularCommands.
private void handleRegularCommands(List<ReceiveCommand> cmds, MultiProgressMonitor progress) throws PermissionBackendException, IOException, NoSuchProjectException {
try (TraceTimer traceTimer = newTimer("handleRegularCommands", Metadata.builder().resourceCount(cmds.size()))) {
result.magicPush(false);
for (ReceiveCommand cmd : cmds) {
parseRegularCommand(cmd);
}
Map<BranchNameKey, ReceiveCommand> branches;
try (BatchUpdate bu = batchUpdateFactory.create(project.getNameKey(), user.materializedCopy(), TimeUtil.now());
ObjectInserter ins = repo.newObjectInserter();
ObjectReader reader = ins.newReader();
RevWalk rw = new RevWalk(reader);
MergeOpRepoManager orm = ormProvider.get()) {
bu.setRepository(repo, rw, ins);
bu.setRefLogMessage("push");
int added = 0;
for (ReceiveCommand cmd : cmds) {
if (cmd.getResult() == NOT_ATTEMPTED) {
bu.addRepoOnlyOp(new UpdateOneRefOp(cmd));
added++;
}
}
logger.atFine().log("Added %d additional ref updates", added);
SubmissionExecutor submissionExecutor = new SubmissionExecutor(false, superprojectUpdateSubmissionListeners);
submissionExecutor.execute(ImmutableList.of(bu));
orm.setContext(TimeUtil.now(), user, NotifyResolver.Result.none());
submissionExecutor.afterExecutions(orm);
branches = bu.getSuccessfullyUpdatedBranches(false);
} catch (UpdateException | RestApiException e) {
throw new StorageException(e);
}
// This could be moved into a SubmissionListener
branches.values().stream().filter(c -> isHead(c) || isConfig(c)).forEach(c -> {
// BatchUpdate because they involve kicking off an additional BatchUpdate.
switch(c.getType()) {
case CREATE:
case UPDATE:
case UPDATE_NONFASTFORWARD:
Task closeProgress = progress.beginSubTask("closed", UNKNOWN);
autoCloseChanges(c, closeProgress);
closeProgress.end();
break;
case DELETE:
break;
}
});
}
}
Aggregations