use of org.locationtech.geogig.storage.GraphDatabase in project GeoGig by boundlessgeo.
the class RebuildGraphOp method _call.
/**
* Executes the {@code RebuildGraphOp} operation.
*
* @return a list of {@link ObjectId}s that were found to be missing or incomplete
*/
@Override
protected ImmutableList<ObjectId> _call() {
Repository repository = repository();
Preconditions.checkState(!repository.isSparse(), "Cannot rebuild the graph of a sparse repository.");
List<ObjectId> updated = new LinkedList<ObjectId>();
ImmutableList<Ref> branches = command(BranchListOp.class).setLocal(true).setRemotes(true).call();
GraphDatabase graphDb = repository.graphDatabase();
for (Ref ref : branches) {
Iterator<RevCommit> commits = command(LogOp.class).setUntil(ref.getObjectId()).call();
while (commits.hasNext()) {
RevCommit next = commits.next();
if (graphDb.put(next.getId(), next.getParentIds())) {
updated.add(next.getId());
}
}
}
return ImmutableList.copyOf(updated);
}
use of org.locationtech.geogig.storage.GraphDatabase in project GeoGig by boundlessgeo.
the class SquashOp method _call.
/**
* Executes the squash operation.
*
* @return the new head after modifying the history squashing commits
* @see org.locationtech.geogig.api.AbstractGeoGigOp#call()
*/
@Override
protected ObjectId _call() {
Preconditions.checkNotNull(since);
Preconditions.checkNotNull(until);
GraphDatabase graphDb = graphDatabase();
Repository repository = repository();
Platform platform = platform();
final Optional<Ref> currHead = command(RefParse.class).setName(Ref.HEAD).call();
Preconditions.checkState(currHead.isPresent(), "Repository has no HEAD, can't squash.");
Preconditions.checkState(currHead.get() instanceof SymRef, "Can't squash from detached HEAD");
final SymRef headRef = (SymRef) currHead.get();
final String currentBranch = headRef.getTarget();
Preconditions.checkState(index().isClean() && workingTree().isClean(), "You must have a clean working tree and index to perform a squash.");
Optional<ObjectId> ancestor = command(FindCommonAncestor.class).setLeft(since).setRight(until).call();
Preconditions.checkArgument(ancestor.isPresent(), "'since' and 'until' command do not have a common ancestor");
Preconditions.checkArgument(ancestor.get().equals(since.getId()), "Commits provided in wrong order");
Preconditions.checkArgument(!since.getParentIds().isEmpty(), "'since' commit has no parents");
// we get a a list of commits to apply on top of the squashed commits
List<RevCommit> commits = getCommitsAfterUntil();
ImmutableSet<Ref> refs = command(ForEachRef.class).setPrefixFilter(Ref.HEADS_PREFIX).call();
// we create a list of all parents of those squashed commits, in case they are
// merge commits. The resulting commit will have all these parents
//
// While iterating the set of commits to squash, we check that there are no branch starting
// points among them. Any commit with more than one child causes an exception to be thrown,
// since the squash operation does not support squashing those commits
Iterator<RevCommit> toSquash = command(LogOp.class).setSince(since.getParentIds().get(0)).setUntil(until.getId()).setFirstParentOnly(true).call();
List<ObjectId> firstParents = Lists.newArrayList();
List<ObjectId> secondaryParents = Lists.newArrayList();
final List<ObjectId> squashedIds = Lists.newArrayList();
RevCommit commitToSquash = until;
while (toSquash.hasNext()) {
commitToSquash = toSquash.next();
squashedIds.add(commitToSquash.getId());
Preconditions.checkArgument(graphDb.getChildren(commitToSquash.getId()).size() < 2, "The commits to squash include a branch starting point. Squashing that type of commit is not supported.");
for (Ref ref : refs) {
// In case a branch has been created but no commit has been made on it and the
// starting commit has just one child
Preconditions.checkArgument(!ref.getObjectId().equals(commitToSquash.getId()) || ref.getObjectId().equals(currHead.get().getObjectId()) || commitToSquash.getParentIds().size() > 1, "The commits to squash include a branch starting point. Squashing that type of commit is not supported.");
}
ImmutableList<ObjectId> parentIds = commitToSquash.getParentIds();
for (int i = 1; i < parentIds.size(); i++) {
secondaryParents.add(parentIds.get(i));
}
firstParents.add(parentIds.get(0));
}
Preconditions.checkArgument(since.equals(commitToSquash), "Cannot reach 'since' from 'until' commit through first parentage");
// We do the same check in the children commits
for (RevCommit commit : commits) {
Preconditions.checkArgument(graphDb.getChildren(commit.getId()).size() < 2, "The commits after the ones to squash include a branch starting point. This scenario is not supported.");
for (Ref ref : refs) {
// In case a branch has been created but no commit has been made on it
Preconditions.checkArgument(!ref.getObjectId().equals(commit.getId()) || ref.getObjectId().equals(currHead.get().getObjectId()) || commit.getParentIds().size() > 1, "The commits after the ones to squash include a branch starting point. This scenario is not supported.");
}
}
ObjectId newHead;
// rewind the head
newHead = since.getParentIds().get(0);
command(ResetOp.class).setCommit(Suppliers.ofInstance(newHead)).setMode(ResetMode.HARD).call();
// add the current HEAD as first parent of the resulting commit
// parents.add(0, newHead);
// Create new commit
List<ObjectId> parents = Lists.newArrayList();
parents.addAll(firstParents);
parents.addAll(secondaryParents);
ObjectId endTree = until.getTreeId();
CommitBuilder builder = new CommitBuilder(until);
Collection<ObjectId> filteredParents = Collections2.filter(parents, new Predicate<ObjectId>() {
@Override
public boolean apply(@Nullable ObjectId id) {
return !squashedIds.contains(id);
}
});
builder.setParentIds(Lists.newArrayList(filteredParents));
builder.setTreeId(endTree);
if (message == null) {
message = since.getMessage();
}
long timestamp = platform.currentTimeMillis();
builder.setMessage(message);
builder.setCommitter(resolveCommitter());
builder.setCommitterEmail(resolveCommitterEmail());
builder.setCommitterTimestamp(timestamp);
builder.setCommitterTimeZoneOffset(platform.timeZoneOffset(timestamp));
builder.setAuthorTimestamp(until.getAuthor().getTimestamp());
RevCommit newCommit = builder.build();
repository.objectDatabase().put(newCommit);
newHead = newCommit.getId();
ObjectId newTreeId = newCommit.getTreeId();
command(UpdateRef.class).setName(currentBranch).setNewValue(newHead).call();
command(UpdateSymRef.class).setName(Ref.HEAD).setNewValue(currentBranch).call();
workingTree().updateWorkHead(newTreeId);
index().updateStageHead(newTreeId);
// now put the other commits after the squashed one
newHead = addCommits(commits, currentBranch, newHead);
return newHead;
}
use of org.locationtech.geogig.storage.GraphDatabase in project GeoGig by boundlessgeo.
the class FindCommonAncestor method findLowestCommonAncestor.
/**
* Finds the lowest common ancestor of two commits.
*
* @param leftId the commit id of the left commit
* @param rightId the commit id of the right commit
* @return An {@link Optional} of the lowest common ancestor of the two commits, or
* {@link Optional#absent()} if a common ancestor could not be found.
*/
public Optional<ObjectId> findLowestCommonAncestor(ObjectId leftId, ObjectId rightId) {
Set<GraphNode> leftSet = new HashSet<GraphNode>();
Set<GraphNode> rightSet = new HashSet<GraphNode>();
Queue<GraphNode> leftQueue = new LinkedList<GraphNode>();
Queue<GraphNode> rightQueue = new LinkedList<GraphNode>();
GraphDatabase graphDb = graphDatabase();
GraphNode leftNode = graphDb.getNode(leftId);
leftQueue.add(leftNode);
GraphNode rightNode = graphDb.getNode(rightId);
rightQueue.add(rightNode);
List<GraphNode> potentialCommonAncestors = new LinkedList<GraphNode>();
while (!leftQueue.isEmpty() || !rightQueue.isEmpty()) {
if (!leftQueue.isEmpty()) {
GraphNode commit = leftQueue.poll();
if (processCommit(commit, leftQueue, leftSet, rightQueue, rightSet)) {
potentialCommonAncestors.add(commit);
}
}
if (!rightQueue.isEmpty()) {
GraphNode commit = rightQueue.poll();
if (processCommit(commit, rightQueue, rightSet, leftQueue, leftSet)) {
potentialCommonAncestors.add(commit);
}
}
}
verifyAncestors(potentialCommonAncestors, leftSet, rightSet);
Optional<ObjectId> ancestor = Optional.absent();
if (potentialCommonAncestors.size() > 0) {
ancestor = Optional.of(potentialCommonAncestors.get(0).getIdentifier());
}
return ancestor;
}
use of org.locationtech.geogig.storage.GraphDatabase in project GeoGig by boundlessgeo.
the class AbstractMappedRemoteRepo method fetchSparseCommit.
/**
* This function takes all of the changes introduced by the specified commit and filters them
* based on the repository filter. It then uses the filtered results to construct a new commit
* that is the descendant of commits that the original's parents are mapped to.
*
* @param commitId the commit id of the original, non-sparse commit
* @param allowEmpty allow the function to create an empty sparse commit
*/
private void fetchSparseCommit(ObjectId commitId, boolean allowEmpty) {
Optional<RevObject> object = getObject(commitId);
if (object.isPresent() && object.get().getType().equals(TYPE.COMMIT)) {
RevCommit commit = (RevCommit) object.get();
FilteredDiffIterator changes = getFilteredChanges(commit);
GraphDatabase graphDatabase = localRepository.graphDatabase();
ObjectDatabase objectDatabase = localRepository.objectDatabase();
graphDatabase.put(commit.getId(), commit.getParentIds());
RevTree rootTree = RevTree.EMPTY;
if (commit.getParentIds().size() > 0) {
// Map this commit to the last "sparse" commit in my ancestry
ObjectId mappedCommit = graphDatabase.getMapping(commit.getParentIds().get(0));
graphDatabase.map(commit.getId(), mappedCommit);
Optional<ObjectId> treeId = localRepository.command(ResolveTreeish.class).setTreeish(mappedCommit).call();
if (treeId.isPresent()) {
rootTree = localRepository.getTree(treeId.get());
}
} else {
graphDatabase.map(commit.getId(), ObjectId.NULL);
}
Iterator<DiffEntry> it = Iterators.filter(changes, new Predicate<DiffEntry>() {
@Override
public boolean apply(DiffEntry e) {
return true;
}
});
if (it.hasNext()) {
// Create new commit
WriteTree writeTree = localRepository.command(WriteTree.class).setOldRoot(Suppliers.ofInstance(rootTree)).setDiffSupplier(Suppliers.ofInstance((Iterator<DiffEntry>) it));
if (changes.isAutoIngesting()) {
// the iterator already ingests objects into the ObjectDatabase
writeTree.dontMoveObjects();
}
ObjectId newTreeId = writeTree.call();
CommitBuilder builder = new CommitBuilder(commit);
List<ObjectId> newParents = new LinkedList<ObjectId>();
for (ObjectId parentCommitId : commit.getParentIds()) {
newParents.add(graphDatabase.getMapping(parentCommitId));
}
builder.setParentIds(newParents);
builder.setTreeId(newTreeId);
RevCommit mapped = builder.build();
objectDatabase.put(mapped);
if (changes.wasFiltered()) {
graphDatabase.setProperty(mapped.getId(), GraphDatabase.SPARSE_FLAG, "true");
}
graphDatabase.map(mapped.getId(), commit.getId());
// Replace the old mapping with the new commit Id.
graphDatabase.map(commit.getId(), mapped.getId());
} else if (allowEmpty) {
CommitBuilder builder = new CommitBuilder(commit);
List<ObjectId> newParents = new LinkedList<ObjectId>();
for (ObjectId parentCommitId : commit.getParentIds()) {
newParents.add(graphDatabase.getMapping(parentCommitId));
}
builder.setParentIds(newParents);
builder.setTreeId(rootTree.getId());
builder.setMessage(PLACEHOLDER_COMMIT_MESSAGE);
RevCommit mapped = builder.build();
objectDatabase.put(mapped);
graphDatabase.setProperty(mapped.getId(), GraphDatabase.SPARSE_FLAG, "true");
graphDatabase.map(mapped.getId(), commit.getId());
// Replace the old mapping with the new commit Id.
graphDatabase.map(commit.getId(), mapped.getId());
} else {
// Mark the mapped commit as sparse, since it wont have these changes
graphDatabase.setProperty(graphDatabase.getMapping(commit.getId()), GraphDatabase.SPARSE_FLAG, "true");
}
}
}
Aggregations