Search in sources :

Example 6 with ReplicationTarget

use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.

the class UnorderedWorkAssignerIT method workNotReAdded.

@Test
public void workNotReAdded() throws Exception {
    Set<String> queuedWork = new HashSet<>();
    assigner.setQueuedWork(queuedWork);
    ReplicationTarget target = new ReplicationTarget("cluster1", "table1", Table.ID.of("1"));
    String serializedTarget = target.getPeerName() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getRemoteIdentifier() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getSourceTableId();
    queuedWork.add("wal1|" + serializedTarget.toString());
    // Create two mutations, both of which need replication work done
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    String file1 = "/accumulo/wal/tserver+port/wal1";
    Mutation m = new Mutation(file1);
    WorkSection.add(m, target.toText(), StatusUtil.openWithUnknownLengthValue());
    bw.addMutation(m);
    bw.close();
    DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
    assigner.setWorkQueue(workQueue);
    assigner.setMaxQueueSize(Integer.MAX_VALUE);
    replay(workQueue);
    assigner.createWork();
    verify(workQueue);
}
Also used : ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) DistributedWorkQueue(org.apache.accumulo.server.zookeeper.DistributedWorkQueue) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 7 with ReplicationTarget

use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.

the class WorkMakerIT method singleUnitMultipleTargets.

@Test
public void singleUnitMultipleTargets() throws Exception {
    String table = testName.getMethodName();
    conn.tableOperations().create(table);
    Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(table));
    String file = "hdfs://localhost:8020/accumulo/wal/123456-1234-1234-12345678";
    Mutation m = new Mutation(new Path(file).toString());
    m.put(StatusSection.NAME, new Text(tableId.getUtf8()), StatusUtil.fileCreatedValue(System.currentTimeMillis()));
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    bw.addMutation(m);
    bw.flush();
    // Assert that we have one record in the status section
    Set<ReplicationTarget> expectedTargets = new HashSet<>();
    try (Scanner s = ReplicationTable.getScanner(conn)) {
        StatusSection.limit(s);
        Assert.assertEquals(1, Iterables.size(s));
        MockWorkMaker workMaker = new MockWorkMaker(conn);
        Map<String, String> targetClusters = ImmutableMap.of("remote_cluster_1", "4", "remote_cluster_2", "6", "remote_cluster_3", "8");
        for (Entry<String, String> cluster : targetClusters.entrySet()) {
            expectedTargets.add(new ReplicationTarget(cluster.getKey(), cluster.getValue(), tableId));
        }
        workMaker.setBatchWriter(bw);
        workMaker.addWorkRecord(new Text(file), StatusUtil.fileCreatedValue(System.currentTimeMillis()), targetClusters, tableId);
    }
    try (Scanner s = ReplicationTable.getScanner(conn)) {
        WorkSection.limit(s);
        Set<ReplicationTarget> actualTargets = new HashSet<>();
        for (Entry<Key, Value> entry : s) {
            Assert.assertEquals(file, entry.getKey().getRow().toString());
            Assert.assertEquals(WorkSection.NAME, entry.getKey().getColumnFamily());
            ReplicationTarget target = ReplicationTarget.from(entry.getKey().getColumnQualifier());
            actualTargets.add(target);
        }
        for (ReplicationTarget expected : expectedTargets) {
            Assert.assertTrue("Did not find expected target: " + expected, actualTargets.contains(expected));
            actualTargets.remove(expected);
        }
        Assert.assertTrue("Found extra replication work entries: " + actualTargets, actualTargets.isEmpty());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Scanner(org.apache.accumulo.core.client.Scanner) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Text(org.apache.hadoop.io.Text) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) Value(org.apache.accumulo.core.data.Value) Mutation(org.apache.accumulo.core.data.Mutation) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Key(org.apache.accumulo.core.data.Key) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 8 with ReplicationTarget

use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.

the class FinishedWorkUpdaterIT method recordsWithProgressUpdateBothTables.

@Test
public void recordsWithProgressUpdateBothTables() throws Exception {
    conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME, TablePermission.READ);
    conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME, TablePermission.WRITE);
    ReplicationTable.setOnline(conn);
    String file = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat = Status.newBuilder().setBegin(100).setEnd(200).setClosed(true).setInfiniteEnd(false).build();
    ReplicationTarget target = new ReplicationTarget("peer", "table1", Table.ID.of("1"));
    // Create a single work record for a file to some peer
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    Mutation m = new Mutation(file);
    WorkSection.add(m, target.toText(), ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();
    updater.run();
    try (Scanner s = ReplicationTable.getScanner(conn)) {
        s.setRange(Range.exact(file));
        StatusSection.limit(s);
        Entry<Key, Value> entry = Iterables.getOnlyElement(s);
        Assert.assertEquals(entry.getKey().getColumnFamily(), StatusSection.NAME);
        Assert.assertEquals(entry.getKey().getColumnQualifier().toString(), target.getSourceTableId().canonicalID());
        // We should only rely on the correct begin attribute being returned
        Status actual = Status.parseFrom(entry.getValue().get());
        Assert.assertEquals(stat.getBegin(), actual.getBegin());
    }
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Scanner(org.apache.accumulo.core.client.Scanner) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 9 with ReplicationTarget

use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.

the class FinishedWorkUpdaterIT method chooseMinimumBeginOffset.

@Test
public void chooseMinimumBeginOffset() throws Exception {
    conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME, TablePermission.READ);
    conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME, TablePermission.WRITE);
    ReplicationTable.setOnline(conn);
    String file = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    // @formatter:off
    Status stat1 = Status.newBuilder().setBegin(100).setEnd(1000).setClosed(true).setInfiniteEnd(false).build(), stat2 = Status.newBuilder().setBegin(500).setEnd(1000).setClosed(true).setInfiniteEnd(false).build(), stat3 = Status.newBuilder().setBegin(1).setEnd(1000).setClosed(true).setInfiniteEnd(false).build();
    ReplicationTarget target1 = new ReplicationTarget("peer1", "table1", Table.ID.of("1")), target2 = new ReplicationTarget("peer2", "table2", Table.ID.of("1")), target3 = new ReplicationTarget("peer3", "table3", Table.ID.of("1"));
    // @formatter:on
    // Create a single work record for a file to some peer
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    Mutation m = new Mutation(file);
    WorkSection.add(m, target1.toText(), ProtobufUtil.toValue(stat1));
    WorkSection.add(m, target2.toText(), ProtobufUtil.toValue(stat2));
    WorkSection.add(m, target3.toText(), ProtobufUtil.toValue(stat3));
    bw.addMutation(m);
    bw.close();
    updater.run();
    try (Scanner s = ReplicationTable.getScanner(conn)) {
        s.setRange(Range.exact(file));
        StatusSection.limit(s);
        Entry<Key, Value> entry = Iterables.getOnlyElement(s);
        Assert.assertEquals(entry.getKey().getColumnFamily(), StatusSection.NAME);
        Assert.assertEquals(entry.getKey().getColumnQualifier().toString(), target1.getSourceTableId().canonicalID());
        // We should only rely on the correct begin attribute being returned
        Status actual = Status.parseFrom(entry.getValue().get());
        Assert.assertEquals(1, actual.getBegin());
    }
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Scanner(org.apache.accumulo.core.client.Scanner) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 10 with ReplicationTarget

use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.

the class ReplicationUtil method getPendingReplications.

public Map<ReplicationTarget, Long> getPendingReplications() {
    final Map<ReplicationTarget, Long> counts = new HashMap<>();
    // Read over the queued work
    BatchScanner bs;
    try {
        bs = context.getConnector().createBatchScanner(ReplicationTable.NAME, Authorizations.EMPTY, 4);
    } catch (TableNotFoundException | AccumuloException | AccumuloSecurityException e) {
        log.debug("No replication table exists", e);
        return counts;
    }
    bs.setRanges(Collections.singleton(new Range()));
    WorkSection.limit(bs);
    try {
        Text buffer = new Text();
        for (Entry<Key, Value> entry : bs) {
            Key k = entry.getKey();
            k.getColumnQualifier(buffer);
            ReplicationTarget target = ReplicationTarget.from(buffer);
            // TODO ACCUMULO-2835 once explicit lengths are tracked, we can give size-based estimates instead of just file-based
            Long count = counts.get(target);
            if (null == count) {
                counts.put(target, 1l);
            } else {
                counts.put(target, count + 1);
            }
        }
    } finally {
        bs.close();
    }
    return counts;
}
Also used : AccumuloException(org.apache.accumulo.core.client.AccumuloException) HashMap(java.util.HashMap) BatchScanner(org.apache.accumulo.core.client.BatchScanner) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) Value(org.apache.accumulo.core.data.Value) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) Key(org.apache.accumulo.core.data.Key)

Aggregations

ReplicationTarget (org.apache.accumulo.core.replication.ReplicationTarget)42 Test (org.junit.Test)31 Status (org.apache.accumulo.server.replication.proto.Replication.Status)22 Text (org.apache.hadoop.io.Text)19 Mutation (org.apache.accumulo.core.data.Mutation)18 HashMap (java.util.HashMap)16 BatchWriter (org.apache.accumulo.core.client.BatchWriter)15 Value (org.apache.accumulo.core.data.Value)15 Path (org.apache.hadoop.fs.Path)15 Table (org.apache.accumulo.core.client.impl.Table)13 Key (org.apache.accumulo.core.data.Key)13 HashSet (java.util.HashSet)12 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)12 DistributedWorkQueue (org.apache.accumulo.server.zookeeper.DistributedWorkQueue)9 DataInputStream (java.io.DataInputStream)8 Scanner (org.apache.accumulo.core.client.Scanner)8 AccumuloConfiguration (org.apache.accumulo.core.conf.AccumuloConfiguration)8 ConfigurationCopy (org.apache.accumulo.core.conf.ConfigurationCopy)8 ByteArrayInputStream (java.io.ByteArrayInputStream)6 Map (java.util.Map)5