use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class ReplicationUtil method getReplicationTargets.
public Set<ReplicationTarget> getReplicationTargets() {
// The total set of configured targets
final Set<ReplicationTarget> allConfiguredTargets = new HashSet<>();
final Map<String, Table.ID> tableNameToId = Tables.getNameToIdMap(context.getInstance());
for (String table : tableNameToId.keySet()) {
if (MetadataTable.NAME.equals(table) || RootTable.NAME.equals(table)) {
continue;
}
Table.ID localId = tableNameToId.get(table);
if (null == localId) {
log.trace("Could not determine ID for {}", table);
continue;
}
TableConfiguration tableConf = context.getServerConfigurationFactory().getTableConfiguration(localId);
if (null == tableConf) {
log.trace("Could not get configuration for table {} (it no longer exists)", table);
continue;
}
for (Entry<String, String> prop : tableConf.getAllPropertiesWithPrefix(Property.TABLE_REPLICATION_TARGET).entrySet()) {
String peerName = prop.getKey().substring(Property.TABLE_REPLICATION_TARGET.getKey().length());
String remoteIdentifier = prop.getValue();
ReplicationTarget target = new ReplicationTarget(peerName, remoteIdentifier, localId);
allConfiguredTargets.add(target);
}
}
return allConfiguredTargets;
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class DistributedWorkQueueWorkAssignerHelper method fromQueueKey.
/**
* @param queueKey
* Key from the work queue
* @return Components which created the queue key
*/
public static Entry<String, ReplicationTarget> fromQueueKey(String queueKey) {
requireNonNull(queueKey);
int index = queueKey.indexOf(KEY_SEPARATOR);
if (-1 == index) {
throw new IllegalArgumentException("Could not find expected separator in queue key '" + queueKey + "'");
}
String filename = queueKey.substring(0, index);
int secondIndex = queueKey.indexOf(KEY_SEPARATOR, index + 1);
if (-1 == secondIndex) {
throw new IllegalArgumentException("Could not find expected separator in queue key '" + queueKey + "'");
}
int thirdIndex = queueKey.indexOf(KEY_SEPARATOR, secondIndex + 1);
if (-1 == thirdIndex) {
throw new IllegalArgumentException("Could not find expected seperator in queue key '" + queueKey + "'");
}
return Maps.immutableEntry(filename, new ReplicationTarget(queueKey.substring(index + 1, secondIndex), queueKey.substring(secondIndex + 1, thirdIndex), Table.ID.of(queueKey.substring(thirdIndex + 1))));
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class AccumuloReplicaSystemTest method dontSendEmptyDataToPeer.
@Test
public void dontSendEmptyDataToPeer() throws Exception {
Client replClient = createMock(Client.class);
AccumuloReplicaSystem ars = createMock(AccumuloReplicaSystem.class);
WalEdits edits = new WalEdits(Collections.emptyList());
WalReplication walReplication = new WalReplication(edits, 0, 0, 0);
ReplicationTarget target = new ReplicationTarget("peer", "2", Table.ID.of("1"));
DataInputStream input = null;
Path p = new Path("/accumulo/wals/tserver+port/" + UUID.randomUUID().toString());
Status status = null;
long sizeLimit = Long.MAX_VALUE;
String remoteTableId = target.getRemoteIdentifier();
TCredentials tcreds = null;
Set<Integer> tids = new HashSet<>();
WalClientExecReturn walClientExec = ars.new WalClientExecReturn(target, input, p, status, sizeLimit, remoteTableId, tcreds, tids);
expect(ars.getWalEdits(target, input, p, status, sizeLimit, tids)).andReturn(walReplication);
replay(replClient, ars);
ReplicationStats stats = walClientExec.execute(replClient);
verify(replClient, ars);
Assert.assertEquals(new ReplicationStats(0l, 0l, 0l), stats);
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class AccumuloReplicaSystemTest method mutationsNotReReplicatedToPeers.
@Test
public void mutationsNotReReplicatedToPeers() throws Exception {
AccumuloReplicaSystem ars = new AccumuloReplicaSystem();
Map<String, String> confMap = new HashMap<>();
confMap.put(Property.REPLICATION_NAME.getKey(), "source");
AccumuloConfiguration conf = new ConfigurationCopy(confMap);
ars.setConf(conf);
LogFileValue value = new LogFileValue();
value.mutations = new ArrayList<>();
Mutation m = new Mutation("row");
m.put("", "", new Value(new byte[0]));
value.mutations.add(m);
m = new Mutation("row2");
m.put("", "", new Value(new byte[0]));
m.addReplicationSource("peer");
value.mutations.add(m);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(baos);
// Replicate our 2 mutations to "peer", from tableid 1 to tableid 1
ars.writeValueAvoidingReplicationCycles(out, value, new ReplicationTarget("peer", "1", Table.ID.of("1")));
out.close();
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
DataInputStream in = new DataInputStream(bais);
int numMutations = in.readInt();
Assert.assertEquals(1, numMutations);
m = new Mutation();
m.readFields(in);
Assert.assertEquals("row", new String(m.getRow()));
Assert.assertEquals(1, m.getReplicationSources().size());
Assert.assertTrue("Expected source cluster to be listed in mutation replication source", m.getReplicationSources().contains("source"));
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class AccumuloReplicaSystemTest method endOfFileExceptionOnClosedWalImpliesFullyReplicated.
@Test
public void endOfFileExceptionOnClosedWalImpliesFullyReplicated() throws Exception {
Map<String, String> confMap = new HashMap<>();
confMap.put(Property.REPLICATION_NAME.getKey(), "source");
AccumuloConfiguration conf = new ConfigurationCopy(confMap);
AccumuloReplicaSystem ars = new AccumuloReplicaSystem();
ars.setConf(conf);
// Setting the file to be closed with the infinite end implies that we need to bump the begin up to Long.MAX_VALUE
// If it were still open, more data could be appended that we need to process
Status status = Status.newBuilder().setBegin(100).setEnd(0).setInfiniteEnd(true).setClosed(true).build();
DataInputStream dis = new DataInputStream(new ByteArrayInputStream(new byte[0]));
WalReplication repl = ars.getWalEdits(new ReplicationTarget("peer", "1", Table.ID.of("1")), dis, new Path("/accumulo/wals/tserver+port/wal"), status, Long.MAX_VALUE, new HashSet<>());
// We stopped because we got to the end of the file
Assert.assertEquals(Long.MAX_VALUE, repl.entriesConsumed);
Assert.assertEquals(0, repl.walEdits.getEditsSize());
Assert.assertEquals(0, repl.sizeInRecords);
Assert.assertEquals(0, repl.sizeInBytes);
}
Aggregations