use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.
the class GarbageCollectionTest method bulkImportReplicationRecordsPreventDeletion.
@Test
public void bulkImportReplicationRecordsPreventDeletion() throws Exception {
GarbageCollectionAlgorithm gca = new GarbageCollectionAlgorithm();
TestGCE gce = new TestGCE();
gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/1/t-00001/A000001.rf");
gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/2/t-00002/A000002.rf");
// Some file of unknown length has no replication yet (representative of the bulk-import case)
Status status = Status.newBuilder().setInfiniteEnd(true).setBegin(0).setClosed(true).build();
gce.filesToReplicate.put("hdfs://foo.com:6000/accumulo/tables/1/t-00001/A000001.rf", status);
gca.collect(gce);
// We need to replicate that one file still, should not delete it.
Assert.assertEquals(1, gce.deletes.size());
Assert.assertEquals("hdfs://foo.com:6000/accumulo/tables/2/t-00002/A000002.rf", gce.deletes.get(0));
}
use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.
the class GarbageCollectionTest method finishedReplicationRecordsDontPreventDeletion.
@Test
public void finishedReplicationRecordsDontPreventDeletion() throws Exception {
GarbageCollectionAlgorithm gca = new GarbageCollectionAlgorithm();
TestGCE gce = new TestGCE();
gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/1/t-00001/A000001.rf");
gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/2/t-00002/A000002.rf");
Status status = Status.newBuilder().setClosed(true).setEnd(100).setBegin(100).build();
gce.filesToReplicate.put("hdfs://foo.com:6000/accumulo/tables/1/t-00001/A000001.rf", status);
gca.collect(gce);
// No refs to A000002.rf, and a closed, finished repl for A000001.rf should not preclude
// it from being deleted
Assert.assertEquals(2, gce.deletes.size());
}
use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.
the class StatusCombinerTest method commutativeWithMultipleUpdates.
@Test
public void commutativeWithMultipleUpdates() {
Status newFile = StatusUtil.fileCreated(100), update1 = StatusUtil.ingestedUntil(100), update2 = StatusUtil.ingestedUntil(200), repl1 = StatusUtil.replicated(50), repl2 = StatusUtil.replicated(150);
Status order1 = combiner.typedReduce(key, Arrays.asList(newFile, update1, repl1, update2, repl2).iterator());
// Got all replication updates before ingest updates
Status permutation = combiner.typedReduce(key, Arrays.asList(newFile, repl1, update1, repl2, update2).iterator());
Assert.assertEquals(order1, permutation);
// All replications before updates
permutation = combiner.typedReduce(key, Arrays.asList(newFile, repl1, repl2, update1, update2).iterator());
Assert.assertEquals(order1, permutation);
// All updates before replications
permutation = combiner.typedReduce(key, Arrays.asList(newFile, update1, update2, repl1, repl2, update1, update2).iterator());
Assert.assertEquals(order1, permutation);
}
use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.
the class StatusCombinerTest method duplicateStatuses.
@Test
public void duplicateStatuses() {
Status newFile = StatusUtil.fileCreated(100), update1 = StatusUtil.ingestedUntil(builder, 100), update2 = StatusUtil.ingestedUntil(builder, 200), repl1 = StatusUtil.replicated(builder, 50), repl2 = StatusUtil.replicated(builder, 150);
Status order1 = combiner.typedReduce(key, Arrays.asList(newFile, update1, repl1, update2, repl2).iterator());
// Repeat the same thing more than once
Status permutation = combiner.typedReduce(key, Arrays.asList(newFile, repl1, update1, update1, repl2, update2, update2).iterator());
Assert.assertEquals(order1, permutation);
}
use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.
the class StatusCombinerTest method commutativeWithClose.
@Test
public void commutativeWithClose() {
Status newFile = StatusUtil.fileCreated(100), closed = StatusUtil.fileClosed(), secondSync = StatusUtil.ingestedUntil(200);
Status order1 = combiner.typedReduce(key, Arrays.asList(newFile, closed, secondSync).iterator()), order2 = combiner.typedReduce(key, Arrays.asList(newFile, secondSync, closed).iterator());
Assert.assertEquals(order1, order2);
}
Aggregations