Search in sources :

Example 11 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class GarbageCollectionTest method bulkImportReplicationRecordsPreventDeletion.

@Test
public void bulkImportReplicationRecordsPreventDeletion() throws Exception {
    GarbageCollectionAlgorithm gca = new GarbageCollectionAlgorithm();
    TestGCE gce = new TestGCE();
    gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/1/t-00001/A000001.rf");
    gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/2/t-00002/A000002.rf");
    // Some file of unknown length has no replication yet (representative of the bulk-import case)
    Status status = Status.newBuilder().setInfiniteEnd(true).setBegin(0).setClosed(true).build();
    gce.filesToReplicate.put("hdfs://foo.com:6000/accumulo/tables/1/t-00001/A000001.rf", status);
    gca.collect(gce);
    // We need to replicate that one file still, should not delete it.
    Assert.assertEquals(1, gce.deletes.size());
    Assert.assertEquals("hdfs://foo.com:6000/accumulo/tables/2/t-00002/A000002.rf", gce.deletes.get(0));
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Test(org.junit.Test)

Example 12 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class GarbageCollectionTest method finishedReplicationRecordsDontPreventDeletion.

@Test
public void finishedReplicationRecordsDontPreventDeletion() throws Exception {
    GarbageCollectionAlgorithm gca = new GarbageCollectionAlgorithm();
    TestGCE gce = new TestGCE();
    gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/1/t-00001/A000001.rf");
    gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/2/t-00002/A000002.rf");
    Status status = Status.newBuilder().setClosed(true).setEnd(100).setBegin(100).build();
    gce.filesToReplicate.put("hdfs://foo.com:6000/accumulo/tables/1/t-00001/A000001.rf", status);
    gca.collect(gce);
    // No refs to A000002.rf, and a closed, finished repl for A000001.rf should not preclude
    // it from being deleted
    Assert.assertEquals(2, gce.deletes.size());
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Test(org.junit.Test)

Example 13 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class StatusCombinerTest method commutativeWithMultipleUpdates.

@Test
public void commutativeWithMultipleUpdates() {
    Status newFile = StatusUtil.fileCreated(100), update1 = StatusUtil.ingestedUntil(100), update2 = StatusUtil.ingestedUntil(200), repl1 = StatusUtil.replicated(50), repl2 = StatusUtil.replicated(150);
    Status order1 = combiner.typedReduce(key, Arrays.asList(newFile, update1, repl1, update2, repl2).iterator());
    // Got all replication updates before ingest updates
    Status permutation = combiner.typedReduce(key, Arrays.asList(newFile, repl1, update1, repl2, update2).iterator());
    Assert.assertEquals(order1, permutation);
    // All replications before updates
    permutation = combiner.typedReduce(key, Arrays.asList(newFile, repl1, repl2, update1, update2).iterator());
    Assert.assertEquals(order1, permutation);
    // All updates before replications
    permutation = combiner.typedReduce(key, Arrays.asList(newFile, update1, update2, repl1, repl2, update1, update2).iterator());
    Assert.assertEquals(order1, permutation);
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Test(org.junit.Test)

Example 14 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class StatusCombinerTest method duplicateStatuses.

@Test
public void duplicateStatuses() {
    Status newFile = StatusUtil.fileCreated(100), update1 = StatusUtil.ingestedUntil(builder, 100), update2 = StatusUtil.ingestedUntil(builder, 200), repl1 = StatusUtil.replicated(builder, 50), repl2 = StatusUtil.replicated(builder, 150);
    Status order1 = combiner.typedReduce(key, Arrays.asList(newFile, update1, repl1, update2, repl2).iterator());
    // Repeat the same thing more than once
    Status permutation = combiner.typedReduce(key, Arrays.asList(newFile, repl1, update1, update1, repl2, update2, update2).iterator());
    Assert.assertEquals(order1, permutation);
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Test(org.junit.Test)

Example 15 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class StatusCombinerTest method commutativeWithClose.

@Test
public void commutativeWithClose() {
    Status newFile = StatusUtil.fileCreated(100), closed = StatusUtil.fileClosed(), secondSync = StatusUtil.ingestedUntil(200);
    Status order1 = combiner.typedReduce(key, Arrays.asList(newFile, closed, secondSync).iterator()), order2 = combiner.typedReduce(key, Arrays.asList(newFile, secondSync, closed).iterator());
    Assert.assertEquals(order1, order2);
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Test(org.junit.Test)

Aggregations

Status (org.apache.accumulo.server.replication.proto.Replication.Status)77 Test (org.junit.Test)57 Mutation (org.apache.accumulo.core.data.Mutation)30 Text (org.apache.hadoop.io.Text)29 BatchWriter (org.apache.accumulo.core.client.BatchWriter)28 Key (org.apache.accumulo.core.data.Key)27 Value (org.apache.accumulo.core.data.Value)26 Scanner (org.apache.accumulo.core.client.Scanner)21 ReplicationTarget (org.apache.accumulo.core.replication.ReplicationTarget)20 Path (org.apache.hadoop.fs.Path)17 HashMap (java.util.HashMap)14 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)14 Table (org.apache.accumulo.core.client.impl.Table)14 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)13 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)12 AccumuloException (org.apache.accumulo.core.client.AccumuloException)11 Connector (org.apache.accumulo.core.client.Connector)11 InvalidProtocolBufferException (com.google.protobuf.InvalidProtocolBufferException)10 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)10 DataInputStream (java.io.DataInputStream)9