Search in sources :

Example 11 with ReplicationTarget

use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.

the class ReplicationUtil method getReplicationTargets.

public Set<ReplicationTarget> getReplicationTargets() {
    // The total set of configured targets
    final Set<ReplicationTarget> allConfiguredTargets = new HashSet<>();
    final Map<String, Table.ID> tableNameToId = Tables.getNameToIdMap(context.getInstance());
    for (String table : tableNameToId.keySet()) {
        if (MetadataTable.NAME.equals(table) || RootTable.NAME.equals(table)) {
            continue;
        }
        Table.ID localId = tableNameToId.get(table);
        if (null == localId) {
            log.trace("Could not determine ID for {}", table);
            continue;
        }
        TableConfiguration tableConf = context.getServerConfigurationFactory().getTableConfiguration(localId);
        if (null == tableConf) {
            log.trace("Could not get configuration for table {} (it no longer exists)", table);
            continue;
        }
        for (Entry<String, String> prop : tableConf.getAllPropertiesWithPrefix(Property.TABLE_REPLICATION_TARGET).entrySet()) {
            String peerName = prop.getKey().substring(Property.TABLE_REPLICATION_TARGET.getKey().length());
            String remoteIdentifier = prop.getValue();
            ReplicationTarget target = new ReplicationTarget(peerName, remoteIdentifier, localId);
            allConfiguredTargets.add(target);
        }
    }
    return allConfiguredTargets;
}
Also used : MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) RootTable(org.apache.accumulo.core.metadata.RootTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) TableConfiguration(org.apache.accumulo.server.conf.TableConfiguration) HashSet(java.util.HashSet)

Example 12 with ReplicationTarget

use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.

the class DistributedWorkQueueWorkAssignerHelper method fromQueueKey.

/**
 * @param queueKey
 *          Key from the work queue
 * @return Components which created the queue key
 */
public static Entry<String, ReplicationTarget> fromQueueKey(String queueKey) {
    requireNonNull(queueKey);
    int index = queueKey.indexOf(KEY_SEPARATOR);
    if (-1 == index) {
        throw new IllegalArgumentException("Could not find expected separator in queue key '" + queueKey + "'");
    }
    String filename = queueKey.substring(0, index);
    int secondIndex = queueKey.indexOf(KEY_SEPARATOR, index + 1);
    if (-1 == secondIndex) {
        throw new IllegalArgumentException("Could not find expected separator in queue key '" + queueKey + "'");
    }
    int thirdIndex = queueKey.indexOf(KEY_SEPARATOR, secondIndex + 1);
    if (-1 == thirdIndex) {
        throw new IllegalArgumentException("Could not find expected seperator in queue key '" + queueKey + "'");
    }
    return Maps.immutableEntry(filename, new ReplicationTarget(queueKey.substring(index + 1, secondIndex), queueKey.substring(secondIndex + 1, thirdIndex), Table.ID.of(queueKey.substring(thirdIndex + 1))));
}
Also used : ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget)

Example 13 with ReplicationTarget

use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.

the class AccumuloReplicaSystemTest method dontSendEmptyDataToPeer.

@Test
public void dontSendEmptyDataToPeer() throws Exception {
    Client replClient = createMock(Client.class);
    AccumuloReplicaSystem ars = createMock(AccumuloReplicaSystem.class);
    WalEdits edits = new WalEdits(Collections.emptyList());
    WalReplication walReplication = new WalReplication(edits, 0, 0, 0);
    ReplicationTarget target = new ReplicationTarget("peer", "2", Table.ID.of("1"));
    DataInputStream input = null;
    Path p = new Path("/accumulo/wals/tserver+port/" + UUID.randomUUID().toString());
    Status status = null;
    long sizeLimit = Long.MAX_VALUE;
    String remoteTableId = target.getRemoteIdentifier();
    TCredentials tcreds = null;
    Set<Integer> tids = new HashSet<>();
    WalClientExecReturn walClientExec = ars.new WalClientExecReturn(target, input, p, status, sizeLimit, remoteTableId, tcreds, tids);
    expect(ars.getWalEdits(target, input, p, status, sizeLimit, tids)).andReturn(walReplication);
    replay(replClient, ars);
    ReplicationStats stats = walClientExec.execute(replClient);
    verify(replClient, ars);
    Assert.assertEquals(new ReplicationStats(0l, 0l, 0l), stats);
}
Also used : Path(org.apache.hadoop.fs.Path) Status(org.apache.accumulo.server.replication.proto.Replication.Status) TCredentials(org.apache.accumulo.core.security.thrift.TCredentials) WalReplication(org.apache.accumulo.tserver.replication.AccumuloReplicaSystem.WalReplication) WalClientExecReturn(org.apache.accumulo.tserver.replication.AccumuloReplicaSystem.WalClientExecReturn) DataInputStream(java.io.DataInputStream) WalEdits(org.apache.accumulo.core.replication.thrift.WalEdits) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) ReplicationStats(org.apache.accumulo.tserver.replication.AccumuloReplicaSystem.ReplicationStats) Client(org.apache.accumulo.core.replication.thrift.ReplicationServicer.Client) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 14 with ReplicationTarget

use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.

the class AccumuloReplicaSystemTest method mutationsNotReReplicatedToPeers.

@Test
public void mutationsNotReReplicatedToPeers() throws Exception {
    AccumuloReplicaSystem ars = new AccumuloReplicaSystem();
    Map<String, String> confMap = new HashMap<>();
    confMap.put(Property.REPLICATION_NAME.getKey(), "source");
    AccumuloConfiguration conf = new ConfigurationCopy(confMap);
    ars.setConf(conf);
    LogFileValue value = new LogFileValue();
    value.mutations = new ArrayList<>();
    Mutation m = new Mutation("row");
    m.put("", "", new Value(new byte[0]));
    value.mutations.add(m);
    m = new Mutation("row2");
    m.put("", "", new Value(new byte[0]));
    m.addReplicationSource("peer");
    value.mutations.add(m);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutputStream out = new DataOutputStream(baos);
    // Replicate our 2 mutations to "peer", from tableid 1 to tableid 1
    ars.writeValueAvoidingReplicationCycles(out, value, new ReplicationTarget("peer", "1", Table.ID.of("1")));
    out.close();
    ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
    DataInputStream in = new DataInputStream(bais);
    int numMutations = in.readInt();
    Assert.assertEquals(1, numMutations);
    m = new Mutation();
    m.readFields(in);
    Assert.assertEquals("row", new String(m.getRow()));
    Assert.assertEquals(1, m.getReplicationSources().size());
    Assert.assertTrue("Expected source cluster to be listed in mutation replication source", m.getReplicationSources().contains("source"));
}
Also used : ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) HashMap(java.util.HashMap) DataOutputStream(java.io.DataOutputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) DataInputStream(java.io.DataInputStream) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) ByteArrayInputStream(java.io.ByteArrayInputStream) LogFileValue(org.apache.accumulo.tserver.logger.LogFileValue) Value(org.apache.accumulo.core.data.Value) LogFileValue(org.apache.accumulo.tserver.logger.LogFileValue) Mutation(org.apache.accumulo.core.data.Mutation) ServerMutation(org.apache.accumulo.server.data.ServerMutation) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) Test(org.junit.Test)

Example 15 with ReplicationTarget

use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.

the class AccumuloReplicaSystemTest method endOfFileExceptionOnClosedWalImpliesFullyReplicated.

@Test
public void endOfFileExceptionOnClosedWalImpliesFullyReplicated() throws Exception {
    Map<String, String> confMap = new HashMap<>();
    confMap.put(Property.REPLICATION_NAME.getKey(), "source");
    AccumuloConfiguration conf = new ConfigurationCopy(confMap);
    AccumuloReplicaSystem ars = new AccumuloReplicaSystem();
    ars.setConf(conf);
    // Setting the file to be closed with the infinite end implies that we need to bump the begin up to Long.MAX_VALUE
    // If it were still open, more data could be appended that we need to process
    Status status = Status.newBuilder().setBegin(100).setEnd(0).setInfiniteEnd(true).setClosed(true).build();
    DataInputStream dis = new DataInputStream(new ByteArrayInputStream(new byte[0]));
    WalReplication repl = ars.getWalEdits(new ReplicationTarget("peer", "1", Table.ID.of("1")), dis, new Path("/accumulo/wals/tserver+port/wal"), status, Long.MAX_VALUE, new HashSet<>());
    // We stopped because we got to the end of the file
    Assert.assertEquals(Long.MAX_VALUE, repl.entriesConsumed);
    Assert.assertEquals(0, repl.walEdits.getEditsSize());
    Assert.assertEquals(0, repl.sizeInRecords);
    Assert.assertEquals(0, repl.sizeInBytes);
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Path(org.apache.hadoop.fs.Path) ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) HashMap(java.util.HashMap) WalReplication(org.apache.accumulo.tserver.replication.AccumuloReplicaSystem.WalReplication) DataInputStream(java.io.DataInputStream) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) ByteArrayInputStream(java.io.ByteArrayInputStream) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) Test(org.junit.Test)

Aggregations

ReplicationTarget (org.apache.accumulo.core.replication.ReplicationTarget)42 Test (org.junit.Test)31 Status (org.apache.accumulo.server.replication.proto.Replication.Status)22 Text (org.apache.hadoop.io.Text)19 Mutation (org.apache.accumulo.core.data.Mutation)18 HashMap (java.util.HashMap)16 BatchWriter (org.apache.accumulo.core.client.BatchWriter)15 Value (org.apache.accumulo.core.data.Value)15 Path (org.apache.hadoop.fs.Path)15 Table (org.apache.accumulo.core.client.impl.Table)13 Key (org.apache.accumulo.core.data.Key)13 HashSet (java.util.HashSet)12 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)12 DistributedWorkQueue (org.apache.accumulo.server.zookeeper.DistributedWorkQueue)9 DataInputStream (java.io.DataInputStream)8 Scanner (org.apache.accumulo.core.client.Scanner)8 AccumuloConfiguration (org.apache.accumulo.core.conf.AccumuloConfiguration)8 ConfigurationCopy (org.apache.accumulo.core.conf.ConfigurationCopy)8 ByteArrayInputStream (java.io.ByteArrayInputStream)6 Map (java.util.Map)5