use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class UnorderedWorkAssignerIT method workNotReAdded.
@Test
public void workNotReAdded() throws Exception {
Set<String> queuedWork = new HashSet<>();
assigner.setQueuedWork(queuedWork);
ReplicationTarget target = new ReplicationTarget("cluster1", "table1", Table.ID.of("1"));
String serializedTarget = target.getPeerName() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getRemoteIdentifier() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getSourceTableId();
queuedWork.add("wal1|" + serializedTarget.toString());
// Create two mutations, both of which need replication work done
BatchWriter bw = ReplicationTable.getBatchWriter(conn);
String file1 = "/accumulo/wal/tserver+port/wal1";
Mutation m = new Mutation(file1);
WorkSection.add(m, target.toText(), StatusUtil.openWithUnknownLengthValue());
bw.addMutation(m);
bw.close();
DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
assigner.setWorkQueue(workQueue);
assigner.setMaxQueueSize(Integer.MAX_VALUE);
replay(workQueue);
assigner.createWork();
verify(workQueue);
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class WorkMakerIT method singleUnitMultipleTargets.
@Test
public void singleUnitMultipleTargets() throws Exception {
String table = testName.getMethodName();
conn.tableOperations().create(table);
Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(table));
String file = "hdfs://localhost:8020/accumulo/wal/123456-1234-1234-12345678";
Mutation m = new Mutation(new Path(file).toString());
m.put(StatusSection.NAME, new Text(tableId.getUtf8()), StatusUtil.fileCreatedValue(System.currentTimeMillis()));
BatchWriter bw = ReplicationTable.getBatchWriter(conn);
bw.addMutation(m);
bw.flush();
// Assert that we have one record in the status section
Set<ReplicationTarget> expectedTargets = new HashSet<>();
try (Scanner s = ReplicationTable.getScanner(conn)) {
StatusSection.limit(s);
Assert.assertEquals(1, Iterables.size(s));
MockWorkMaker workMaker = new MockWorkMaker(conn);
Map<String, String> targetClusters = ImmutableMap.of("remote_cluster_1", "4", "remote_cluster_2", "6", "remote_cluster_3", "8");
for (Entry<String, String> cluster : targetClusters.entrySet()) {
expectedTargets.add(new ReplicationTarget(cluster.getKey(), cluster.getValue(), tableId));
}
workMaker.setBatchWriter(bw);
workMaker.addWorkRecord(new Text(file), StatusUtil.fileCreatedValue(System.currentTimeMillis()), targetClusters, tableId);
}
try (Scanner s = ReplicationTable.getScanner(conn)) {
WorkSection.limit(s);
Set<ReplicationTarget> actualTargets = new HashSet<>();
for (Entry<Key, Value> entry : s) {
Assert.assertEquals(file, entry.getKey().getRow().toString());
Assert.assertEquals(WorkSection.NAME, entry.getKey().getColumnFamily());
ReplicationTarget target = ReplicationTarget.from(entry.getKey().getColumnQualifier());
actualTargets.add(target);
}
for (ReplicationTarget expected : expectedTargets) {
Assert.assertTrue("Did not find expected target: " + expected, actualTargets.contains(expected));
actualTargets.remove(expected);
}
Assert.assertTrue("Found extra replication work entries: " + actualTargets, actualTargets.isEmpty());
}
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class FinishedWorkUpdaterIT method recordsWithProgressUpdateBothTables.
@Test
public void recordsWithProgressUpdateBothTables() throws Exception {
conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME, TablePermission.READ);
conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME, TablePermission.WRITE);
ReplicationTable.setOnline(conn);
String file = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
Status stat = Status.newBuilder().setBegin(100).setEnd(200).setClosed(true).setInfiniteEnd(false).build();
ReplicationTarget target = new ReplicationTarget("peer", "table1", Table.ID.of("1"));
// Create a single work record for a file to some peer
BatchWriter bw = ReplicationTable.getBatchWriter(conn);
Mutation m = new Mutation(file);
WorkSection.add(m, target.toText(), ProtobufUtil.toValue(stat));
bw.addMutation(m);
bw.close();
updater.run();
try (Scanner s = ReplicationTable.getScanner(conn)) {
s.setRange(Range.exact(file));
StatusSection.limit(s);
Entry<Key, Value> entry = Iterables.getOnlyElement(s);
Assert.assertEquals(entry.getKey().getColumnFamily(), StatusSection.NAME);
Assert.assertEquals(entry.getKey().getColumnQualifier().toString(), target.getSourceTableId().canonicalID());
// We should only rely on the correct begin attribute being returned
Status actual = Status.parseFrom(entry.getValue().get());
Assert.assertEquals(stat.getBegin(), actual.getBegin());
}
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class FinishedWorkUpdaterIT method chooseMinimumBeginOffset.
@Test
public void chooseMinimumBeginOffset() throws Exception {
conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME, TablePermission.READ);
conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME, TablePermission.WRITE);
ReplicationTable.setOnline(conn);
String file = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
// @formatter:off
Status stat1 = Status.newBuilder().setBegin(100).setEnd(1000).setClosed(true).setInfiniteEnd(false).build(), stat2 = Status.newBuilder().setBegin(500).setEnd(1000).setClosed(true).setInfiniteEnd(false).build(), stat3 = Status.newBuilder().setBegin(1).setEnd(1000).setClosed(true).setInfiniteEnd(false).build();
ReplicationTarget target1 = new ReplicationTarget("peer1", "table1", Table.ID.of("1")), target2 = new ReplicationTarget("peer2", "table2", Table.ID.of("1")), target3 = new ReplicationTarget("peer3", "table3", Table.ID.of("1"));
// @formatter:on
// Create a single work record for a file to some peer
BatchWriter bw = ReplicationTable.getBatchWriter(conn);
Mutation m = new Mutation(file);
WorkSection.add(m, target1.toText(), ProtobufUtil.toValue(stat1));
WorkSection.add(m, target2.toText(), ProtobufUtil.toValue(stat2));
WorkSection.add(m, target3.toText(), ProtobufUtil.toValue(stat3));
bw.addMutation(m);
bw.close();
updater.run();
try (Scanner s = ReplicationTable.getScanner(conn)) {
s.setRange(Range.exact(file));
StatusSection.limit(s);
Entry<Key, Value> entry = Iterables.getOnlyElement(s);
Assert.assertEquals(entry.getKey().getColumnFamily(), StatusSection.NAME);
Assert.assertEquals(entry.getKey().getColumnQualifier().toString(), target1.getSourceTableId().canonicalID());
// We should only rely on the correct begin attribute being returned
Status actual = Status.parseFrom(entry.getValue().get());
Assert.assertEquals(1, actual.getBegin());
}
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class ReplicationUtil method getPendingReplications.
public Map<ReplicationTarget, Long> getPendingReplications() {
final Map<ReplicationTarget, Long> counts = new HashMap<>();
// Read over the queued work
BatchScanner bs;
try {
bs = context.getConnector().createBatchScanner(ReplicationTable.NAME, Authorizations.EMPTY, 4);
} catch (TableNotFoundException | AccumuloException | AccumuloSecurityException e) {
log.debug("No replication table exists", e);
return counts;
}
bs.setRanges(Collections.singleton(new Range()));
WorkSection.limit(bs);
try {
Text buffer = new Text();
for (Entry<Key, Value> entry : bs) {
Key k = entry.getKey();
k.getColumnQualifier(buffer);
ReplicationTarget target = ReplicationTarget.from(buffer);
// TODO ACCUMULO-2835 once explicit lengths are tracked, we can give size-based estimates instead of just file-based
Long count = counts.get(target);
if (null == count) {
counts.put(target, 1l);
} else {
counts.put(target, count + 1);
}
}
} finally {
bs.close();
}
return counts;
}
Aggregations