use of org.locationtech.geogig.storage.ObjectDatabase in project GeoGig by boundlessgeo.
the class HttpRemoteRepo method sendPackedObjects.
private void sendPackedObjects(final List<ObjectId> toSend, final Set<ObjectId> roots, Deduplicator deduplicator, final ProgressListener progress) {
Set<ObjectId> sent = new HashSet<ObjectId>();
while (!toSend.isEmpty()) {
try {
BinaryPackedObjects.Callback callback = new BinaryPackedObjects.Callback() {
@Override
public void callback(Supplier<RevObject> supplier) {
RevObject object = supplier.get();
progress.setProgress(progress.getProgress() + 1);
if (object instanceof RevCommit) {
RevCommit commit = (RevCommit) object;
toSend.remove(commit.getId());
roots.removeAll(commit.getParentIds());
roots.add(commit.getId());
}
}
};
ObjectDatabase database = localRepository.objectDatabase();
BinaryPackedObjects packer = new BinaryPackedObjects(database);
ImmutableList<ObjectId> have = ImmutableList.copyOf(roots);
final boolean traverseCommits = false;
Stopwatch sw = Stopwatch.createStarted();
ObjectSerializingFactory serializer = DataStreamSerializationFactoryV1.INSTANCE;
SendObjectsConnectionFactory outFactory;
ObjectFunnel objectFunnel;
outFactory = new SendObjectsConnectionFactory(repositoryURL);
int pushBytesLimit = parsePushLimit();
objectFunnel = ObjectFunnels.newFunnel(outFactory, serializer, pushBytesLimit);
final long writtenObjectsCount = packer.write(objectFunnel, toSend, have, sent, callback, traverseCommits, deduplicator);
objectFunnel.close();
sw.stop();
long compressedSize = outFactory.compressedSize;
long uncompressedSize = outFactory.uncompressedSize;
LOGGER.info(String.format("HttpRemoteRepo: Written %,d objects." + " Time to process: %s." + " Compressed size: %,d bytes. Uncompressed size: %,d bytes.", writtenObjectsCount, sw, compressedSize, uncompressedSize));
} catch (IOException e) {
Throwables.propagate(e);
}
}
}
use of org.locationtech.geogig.storage.ObjectDatabase in project GeoGig by boundlessgeo.
the class JEObjectDatabaseTest method testMultipleInstances.
public void testMultipleInstances() {
ObjectDatabase db1 = createDb();
ObjectDatabase db2 = createDb();
RevObject obj = RevTree.EMPTY;
assertTrue(db1.put(obj));
db1.close();
assertFalse(db2.put(obj));
db2.close();
RevObject revObject = db.get(obj.getId());
assertEquals(obj, revObject);
}
use of org.locationtech.geogig.storage.ObjectDatabase in project GeoGig by boundlessgeo.
the class WriteBack method _call.
/**
* Executes the write back operation.
*
* @return the {@link ObjectId id} of the resulting new ancestor tree.
*/
@Override
protected ObjectId _call() {
checkNotNull(tree, "child tree not set");
checkNotNull(childPath, "child tree path not set");
String ancestorPath = resolveAncestorPath();
checkArgument(NodeRef.isChild(ancestorPath, childPath), String.format("child path '%s' is not a child of ancestor path '%s'", childPath, ancestorPath));
RevTree tree = this.tree.get();
checkState(null != tree, "child tree supplier returned null");
ObjectDatabase targetDb = indexDb ? stagingDatabase() : objectDatabase();
RevTreeBuilder root = resolveAncestor();
return writeBack(root, ancestorPath, tree, childPath, targetDb, metadataId.or(ObjectId.NULL));
}
use of org.locationtech.geogig.storage.ObjectDatabase in project GeoGig by boundlessgeo.
the class WriteTree2 method applyChanges.
private RevTree applyChanges(@Nullable final NodeRef leftTreeRef, @Nullable final NodeRef rightTreeRef) {
Preconditions.checkArgument(leftTreeRef != null || rightTreeRef != null, "either left or right tree shall be non null");
final ObjectDatabase repositoryDatabase = objectDatabase();
final String treePath = rightTreeRef == null ? leftTreeRef.path() : rightTreeRef.path();
final List<String> strippedPathFilters = stripParentAndFiltersThatDontApply(this.pathFilters, treePath);
// find the diffs that apply to the path filters
final ObjectId leftTreeId = leftTreeRef == null ? RevTree.EMPTY_TREE_ID : leftTreeRef.objectId();
final ObjectId rightTreeId = rightTreeRef == null ? RevTree.EMPTY_TREE_ID : rightTreeRef.objectId();
final Predicate<Bounded> existsFilter = new Predicate<Bounded>() {
private final ObjectDatabase targetDb = repositoryDatabase;
@Override
public boolean apply(Bounded input) {
ObjectId id = null;
if (input instanceof Node && TYPE.TREE.equals(((Node) input).getType())) {
id = ((Node) input).getObjectId();
} else if (input instanceof Bucket) {
Bucket b = (Bucket) input;
id = b.id();
}
if (id != null) {
if (targetDb.exists(id)) {
LOGGER.trace("Ignoring {}. Already exists in target database.", input);
return false;
}
}
return true;
}
};
DiffTree diffs = command(DiffTree.class).setRecursive(false).setReportTrees(false).setOldTree(leftTreeId).setNewTree(rightTreeId).setPathFilter(strippedPathFilters).setCustomFilter(existsFilter);
// move new blobs from the index to the repository (note: this could be parallelized)
Supplier<Iterator<Node>> nodesToMove = asNodeSupplierOfNewContents(diffs, strippedPathFilters);
command(DeepMove.class).setObjects(nodesToMove).call();
final StagingDatabase stagingDatabase = stagingDatabase();
final RevTree currentLeftTree = stagingDatabase.getTree(leftTreeId);
final RevTreeBuilder builder = currentLeftTree.builder(repositoryDatabase);
// remove the exists filter, we need to create the new trees taking into account all the
// nodes
diffs.setCustomFilter(null);
Iterator<DiffEntry> iterator = diffs.get();
if (!strippedPathFilters.isEmpty()) {
final Set<String> expected = Sets.newHashSet(strippedPathFilters);
iterator = Iterators.filter(iterator, new Predicate<DiffEntry>() {
@Override
public boolean apply(DiffEntry input) {
boolean applies;
if (input.isDelete()) {
applies = expected.contains(input.oldName());
} else {
applies = expected.contains(input.newName());
}
return applies;
}
});
}
for (; iterator.hasNext(); ) {
final DiffEntry diff = iterator.next();
if (diff.isDelete()) {
builder.remove(diff.oldName());
} else {
NodeRef newObject = diff.getNewObject();
Node node = newObject.getNode();
builder.put(node);
}
}
final RevTree newTree = builder.build();
repositoryDatabase.put(newTree);
return newTree;
}
use of org.locationtech.geogig.storage.ObjectDatabase in project GeoGig by boundlessgeo.
the class WriteTree2 method _call.
/**
* Executes the write tree operation.
*
* @return the new root tree id, the current HEAD tree id if there are no differences between
* the index and the HEAD, or {@code null} if the operation has been cancelled (as
* indicated by the {@link #getProgressListener() progress listener}.
*/
@Override
protected ObjectId _call() {
final ProgressListener progress = getProgressListener();
TreeDifference treeDifference = computeTreeDifference();
if (treeDifference.areEqual()) {
MutableTree leftTree = treeDifference.getLeftTree();
Node leftNode = leftTree.getNode();
ObjectId leftOid = leftNode.getObjectId();
return leftOid;
}
final MutableTree oldLeftTree = treeDifference.getLeftTree().clone();
Preconditions.checkState(oldLeftTree.equals(treeDifference.getLeftTree()));
// handle renames before new and deleted trees for the computation of new and deleted to be
// accurate
Set<String> ignoreList = Sets.newHashSet();
handleRenames(treeDifference, ignoreList);
handlePureMetadataChanges(treeDifference, ignoreList);
handleNewTrees(treeDifference, ignoreList);
handleDeletedTrees(treeDifference, ignoreList);
handleRemainingDifferences(treeDifference, ignoreList);
progress.complete();
MutableTree newLeftTree = treeDifference.getLeftTree();
final ObjectDatabase repositoryDatabase = objectDatabase();
final RevTree newRoot = newLeftTree.build(stagingDatabase(), repositoryDatabase);
if (newRoot.trees().isPresent()) {
for (Node n : newRoot.trees().get()) {
if (n.getMetadataId().isPresent())
deepMove(n.getMetadataId().get());
}
}
ObjectId newRootId = newRoot.getId();
return newRootId;
}
Aggregations