use of org.locationtech.geogig.api.Bucket in project GeoGig by boundlessgeo.
the class FormatCommonV2 method readTree.
public static RevTree readTree(ObjectId id, DataInput in) throws IOException {
final long size = readUnsignedVarLong(in);
final int treeCount = readUnsignedVarInt(in);
final ImmutableList.Builder<Node> featuresBuilder = new ImmutableList.Builder<Node>();
final ImmutableList.Builder<Node> treesBuilder = new ImmutableList.Builder<Node>();
final SortedMap<Integer, Bucket> buckets = new TreeMap<Integer, Bucket>();
final int nFeatures = readUnsignedVarInt(in);
for (int i = 0; i < nFeatures; i++) {
Node n = readNode(in);
checkState(RevObject.TYPE.FEATURE.equals(n.getType()), "Non-feature node in tree's feature list.");
featuresBuilder.add(n);
}
final int nTrees = readUnsignedVarInt(in);
for (int i = 0; i < nTrees; i++) {
Node n = readNode(in);
checkState(RevObject.TYPE.TREE.equals(n.getType()), "Non-tree node in tree's subtree list.");
treesBuilder.add(n);
}
final int nBuckets = readUnsignedVarInt(in);
for (int i = 0; i < nBuckets; i++) {
int bucketIndex = readUnsignedVarInt(in);
{
Integer idx = Integer.valueOf(bucketIndex);
checkState(!buckets.containsKey(idx), "duplicate bucket index: %s", idx);
// checkState(bucketIndex < RevTree.MAX_BUCKETS, "Illegal bucket index: %s", idx);
}
Bucket bucket = readBucketBody(in);
buckets.put(Integer.valueOf(bucketIndex), bucket);
}
checkState(nBuckets == buckets.size(), "expected %s buckets, got %s", nBuckets, buckets.size());
ImmutableList<Node> trees = treesBuilder.build();
ImmutableList<Node> features = featuresBuilder.build();
checkArgument(buckets.isEmpty() || (trees.isEmpty() && features.isEmpty()), "Tree has mixed buckets and nodes; this is not supported.");
if (trees.isEmpty() && features.isEmpty()) {
return RevTreeImpl.createNodeTree(id, size, treeCount, buckets);
}
return RevTreeImpl.createLeafTree(id, size, features, trees);
}
use of org.locationtech.geogig.api.Bucket in project GeoGig by boundlessgeo.
the class LocalRemoteRepo method copyNewObjects.
private void copyNewObjects(RevTree oldTree, RevTree newTree, final ObjectDatabase fromDb, final ObjectDatabase toDb, final ProgressListener progress) {
checkNotNull(oldTree);
checkNotNull(newTree);
checkNotNull(fromDb);
checkNotNull(toDb);
checkNotNull(progress);
// the diff walk uses fromDb as both left and right data source since we're comparing what
// we have in the "origin" database against trees on the same repository
PostOrderDiffWalk diffWalk = new PostOrderDiffWalk(oldTree, newTree, fromDb, fromDb);
// holds object ids that need to be copied to the target db. Pruned when it reaches a
// threshold.
final Set<ObjectId> ids = new HashSet<ObjectId>();
// This filter further refines the post order diff walk by making it ignore trees/buckets
// that are already present in the target db
Predicate<Bounded> filter = new Predicate<Bounded>() {
@Override
public boolean apply(@Nullable Bounded b) {
if (b == null) {
return false;
}
if (progress.isCanceled()) {
// abort traversal
return false;
}
ObjectId id;
if (b instanceof Node) {
Node node = (Node) b;
if (RevObject.TYPE.TREE.equals(node.getType())) {
// check of existence of trees only. For features the diff filtering is good
// enough and checking for existence on each feature would be killer
// performance wise
id = node.getObjectId();
} else {
return true;
}
} else {
id = ((Bucket) b).id();
}
boolean exists = ids.contains(id) || toDb.exists(id);
return !exists;
}
};
// receives notifications of feature/bucket/tree diffs. Only interested in the "new"/right
// side of the comparisons
Consumer consumer = new Consumer() {
final int bulkSize = 10_000;
@Override
public void feature(@Nullable Node left, Node right) {
add(left);
add(right);
}
@Override
public void tree(@Nullable Node left, Node right) {
add(left);
add(right);
}
private void add(@Nullable Node node) {
if (node == null) {
return;
}
ids.add(node.getObjectId());
Optional<ObjectId> metadataId = node.getMetadataId();
if (metadataId.isPresent()) {
ids.add(metadataId.get());
}
checkLimitAndCopy();
}
@Override
public void bucket(int bucketIndex, int bucketDepth, @Nullable Bucket left, Bucket right) {
if (left != null) {
ids.add(left.id());
}
if (right != null) {
ids.add(right.id());
}
checkLimitAndCopy();
}
private void checkLimitAndCopy() {
if (ids.size() >= bulkSize) {
copy(ids, fromDb, toDb, progress);
ids.clear();
}
}
};
diffWalk.walk(filter, consumer);
// copy remaining objects
copy(ids, fromDb, toDb, progress);
}
use of org.locationtech.geogig.api.Bucket in project GeoGig by boundlessgeo.
the class DiffCountConsumer method bucket.
@Override
public boolean bucket(int bucketIndex, int bucketDepth, Bucket left, Bucket right) {
if (left == null || right == null) {
Bucket bucket = left == null ? right : left;
addTreeFeatures(bucket.id(), left != null, right != null);
return false;
}
return true;
}
use of org.locationtech.geogig.api.Bucket in project GeoGig by boundlessgeo.
the class RevTreeSerializationTest method testRoundTripBucketsFull.
@Test
public void testRoundTripBucketsFull() {
ObjectId id = ObjectId.forString("fake");
long size = 100000000;
int childTreeCount = 0;
Map<Integer, Bucket> bucketTrees = createBuckets(32);
final RevTreeImpl tree = RevTreeImpl.createNodeTree(id, size, childTreeCount, bucketTrees);
RevTree roundTripped = read(tree.getId(), write(tree));
assertTreesAreEqual(tree, roundTripped);
}
use of org.locationtech.geogig.api.Bucket in project GeoGig by boundlessgeo.
the class RevTreeSerializationTest method createBuckets.
private Map<Integer, Bucket> createBuckets(int count) {
Map<Integer, Bucket> buckets = Maps.newHashMap();
for (int i = 0; i < count; i++) {
Bucket bucket = Bucket.create(ObjectId.forString("b" + i), new Envelope(i, i * 2, i, i * 2));
buckets.put(i, bucket);
}
return buckets;
}
Aggregations