use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class RemoveCompleteReplicationRecordsIT method replicatedClosedRowsAreRemoved.
@Test
public void replicatedClosedRowsAreRemoved() throws Exception {
BatchWriter replBw = ReplicationTable.getBatchWriter(conn);
int numRecords = 3;
Status.Builder builder = Status.newBuilder();
builder.setClosed(false);
builder.setEnd(10000);
builder.setInfiniteEnd(false);
long time = System.currentTimeMillis();
// Write out numRecords entries to both replication and metadata tables, none of which are fully replicated
for (int i = 0; i < numRecords; i++) {
builder.setCreatedTime(time++);
String file = "/accumulo/wal/tserver+port/" + UUID.randomUUID();
Mutation m = new Mutation(file);
Value v = ProtobufUtil.toValue(builder.setBegin(1000 * (i + 1)).build());
StatusSection.add(m, createTableId(i), v);
replBw.addMutation(m);
m = OrderSection.createMutation(file, time);
OrderSection.add(m, createTableId(i), v);
replBw.addMutation(m);
}
Set<String> filesToRemove = new HashSet<>();
// We created two mutations for each file
numRecords *= 2;
int finalNumRecords = numRecords;
// Add two records that we can delete
String fileToRemove = "/accumulo/wal/tserver+port/" + UUID.randomUUID();
filesToRemove.add(fileToRemove);
Mutation m = new Mutation(fileToRemove);
ReplicationTarget target = new ReplicationTarget("peer1", "5", Table.ID.of("5"));
Value value = ProtobufUtil.toValue(builder.setBegin(10000).setEnd(10000).setClosed(true).setCreatedTime(time).build());
StatusSection.add(m, Table.ID.of("5"), value);
WorkSection.add(m, target.toText(), value);
replBw.addMutation(m);
m = OrderSection.createMutation(fileToRemove, time);
OrderSection.add(m, Table.ID.of("5"), value);
replBw.addMutation(m);
time++;
numRecords += 3;
fileToRemove = "/accumulo/wal/tserver+port/" + UUID.randomUUID();
filesToRemove.add(fileToRemove);
m = new Mutation(fileToRemove);
value = ProtobufUtil.toValue(builder.setBegin(10000).setEnd(10000).setClosed(true).setCreatedTime(time).build());
target = new ReplicationTarget("peer1", "6", Table.ID.of("6"));
StatusSection.add(m, Table.ID.of("6"), value);
WorkSection.add(m, target.toText(), value);
replBw.addMutation(m);
m = OrderSection.createMutation(fileToRemove, time);
OrderSection.add(m, Table.ID.of("6"), value);
replBw.addMutation(m);
time++;
numRecords += 3;
replBw.flush();
// Make sure that we have the expected number of records in both tables
Assert.assertEquals(numRecords, Iterables.size(ReplicationTable.getScanner(conn)));
// We should remove the two fully completed records we inserted
try (BatchScanner bs = ReplicationTable.getBatchScanner(conn, 1)) {
bs.setRanges(Collections.singleton(new Range()));
StatusSection.limit(bs);
WorkSection.limit(bs);
IteratorSetting cfg = new IteratorSetting(50, WholeRowIterator.class);
bs.addScanIterator(cfg);
try {
Assert.assertEquals(4l, rcrr.removeCompleteRecords(conn, bs, replBw));
} finally {
replBw.close();
}
int actualRecords = 0;
for (Entry<Key, Value> entry : ReplicationTable.getScanner(conn)) {
Assert.assertFalse(filesToRemove.contains(entry.getKey().getRow().toString()));
actualRecords++;
}
Assert.assertEquals(finalNumRecords, actualRecords);
}
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class RemoveCompleteReplicationRecordsIT method partiallyReplicatedEntriesPrecludeRowDeletion.
@Test
public void partiallyReplicatedEntriesPrecludeRowDeletion() throws Exception {
BatchWriter replBw = ReplicationTable.getBatchWriter(conn);
int numRecords = 3;
Status.Builder builder = Status.newBuilder();
builder.setClosed(false);
builder.setEnd(10000);
builder.setInfiniteEnd(false);
// Write out numRecords entries to both replication and metadata tables, none of which are fully replicated
for (int i = 0; i < numRecords; i++) {
String file = "/accumulo/wal/tserver+port/" + UUID.randomUUID();
Mutation m = new Mutation(file);
StatusSection.add(m, createTableId(i), ProtobufUtil.toValue(builder.setBegin(1000 * (i + 1)).build()));
replBw.addMutation(m);
}
// Add two records that we can delete
String fileToRemove = "/accumulo/wal/tserver+port/" + UUID.randomUUID();
Mutation m = new Mutation(fileToRemove);
ReplicationTarget target = new ReplicationTarget("peer1", "5", Table.ID.of("5"));
Value value = ProtobufUtil.toValue(builder.setBegin(10000).setEnd(10000).setClosed(true).build());
StatusSection.add(m, Table.ID.of("5"), value);
WorkSection.add(m, target.toText(), value);
target = new ReplicationTarget("peer2", "5", Table.ID.of("5"));
WorkSection.add(m, target.toText(), value);
target = new ReplicationTarget("peer3", "5", Table.ID.of("5"));
WorkSection.add(m, target.toText(), ProtobufUtil.toValue(builder.setClosed(false).build()));
replBw.addMutation(m);
numRecords += 4;
replBw.flush();
// Make sure that we have the expected number of records in both tables
Assert.assertEquals(numRecords, Iterables.size(ReplicationTable.getScanner(conn)));
// We should remove the two fully completed records we inserted
try (BatchScanner bs = ReplicationTable.getBatchScanner(conn, 1)) {
bs.setRanges(Collections.singleton(new Range()));
IteratorSetting cfg = new IteratorSetting(50, WholeRowIterator.class);
bs.addScanIterator(cfg);
try {
Assert.assertEquals(0l, rcrr.removeCompleteRecords(conn, bs, replBw));
} finally {
replBw.close();
}
}
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class ReplicationIT method correctClusterNameInWorkEntry.
@Test
public void correctClusterNameInWorkEntry() throws Exception {
Connector conn = getConnector();
String table1 = "table1";
// replication shouldn't be online when we begin
Assert.assertFalse(ReplicationTable.isOnline(conn));
// Create two tables
conn.tableOperations().create(table1);
int attempts = 5;
while (attempts > 0) {
try {
// Enable replication on table1
conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
// Replicate table1 to cluster1 in the table with id of '4'
conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "4");
attempts = 0;
} catch (Exception e) {
attempts--;
if (attempts <= 0) {
throw e;
}
sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
}
}
// Write some data to table1
writeSomeData(conn, table1, 2000, 50);
conn.tableOperations().flush(table1, null, null, true);
Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(table1));
Assert.assertNotNull("Table ID was null", tableId);
// Make sure the replication table exists at this point
while (!ReplicationTable.isOnline(conn)) {
sleepUninterruptibly(MILLIS_BETWEEN_REPLICATION_TABLE_ONLINE_CHECKS, TimeUnit.MILLISECONDS);
}
Assert.assertTrue("Replication table did not exist", ReplicationTable.isOnline(conn));
for (int i = 0; i < 5 && !conn.securityOperations().hasTablePermission("root", ReplicationTable.NAME, TablePermission.READ); i++) {
Thread.sleep(1000);
}
Assert.assertTrue(conn.securityOperations().hasTablePermission("root", ReplicationTable.NAME, TablePermission.READ));
boolean notFound = true;
for (int i = 0; i < 10 && notFound; i++) {
try (Scanner s = ReplicationTable.getScanner(conn)) {
WorkSection.limit(s);
try {
Entry<Key, Value> e = Iterables.getOnlyElement(s);
Text expectedColqual = new ReplicationTarget("cluster1", "4", tableId).toText();
Assert.assertEquals(expectedColqual, e.getKey().getColumnQualifier());
notFound = false;
} catch (NoSuchElementException e) {
} catch (IllegalArgumentException e) {
try (Scanner s2 = ReplicationTable.getScanner(conn)) {
for (Entry<Key, Value> content : s2) {
log.info("{} => {}", content.getKey().toStringNoTruncate(), content.getValue());
}
Assert.fail("Found more than one work section entry");
}
}
Thread.sleep(500);
}
}
if (notFound) {
try (Scanner s = ReplicationTable.getScanner(conn)) {
for (Entry<Key, Value> content : s) {
log.info("{} => {}", content.getKey().toStringNoTruncate(), content.getValue());
}
Assert.assertFalse("Did not find the work entry for the status entry", notFound);
}
}
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class SequentialWorkAssignerIT method workAcrossTablesHappensConcurrently.
@Test
public void workAcrossTablesHappensConcurrently() throws Exception {
ReplicationTarget target1 = new ReplicationTarget("cluster1", "table1", Table.ID.of("1"));
Text serializedTarget1 = target1.toText();
ReplicationTarget target2 = new ReplicationTarget("cluster1", "table2", Table.ID.of("2"));
Text serializedTarget2 = target2.toText();
// Create two mutations, both of which need replication work done
BatchWriter bw = ReplicationTable.getBatchWriter(conn);
// We want the name of file2 to sort before file1
String filename1 = "z_file1", filename2 = "a_file1";
String file1 = "/accumulo/wal/tserver+port/" + filename1, file2 = "/accumulo/wal/tserver+port/" + filename2;
// File1 was closed before file2, however
Status stat1 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(250).build();
Status stat2 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(500).build();
Mutation m = new Mutation(file1);
WorkSection.add(m, serializedTarget1, ProtobufUtil.toValue(stat1));
bw.addMutation(m);
m = new Mutation(file2);
WorkSection.add(m, serializedTarget2, ProtobufUtil.toValue(stat2));
bw.addMutation(m);
m = OrderSection.createMutation(file1, stat1.getCreatedTime());
OrderSection.add(m, target1.getSourceTableId(), ProtobufUtil.toValue(stat1));
bw.addMutation(m);
m = OrderSection.createMutation(file2, stat2.getCreatedTime());
OrderSection.add(m, target2.getSourceTableId(), ProtobufUtil.toValue(stat2));
bw.addMutation(m);
bw.close();
DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
Map<String, Map<Table.ID, String>> queuedWork = new HashMap<>();
assigner.setQueuedWork(queuedWork);
assigner.setWorkQueue(workQueue);
assigner.setMaxQueueSize(Integer.MAX_VALUE);
// Make sure we expect the invocations in the correct order (accumulo is sorted)
workQueue.addWork(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target1), file1);
expectLastCall().once();
workQueue.addWork(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename2, target2), file2);
expectLastCall().once();
// file2 is *not* queued because file1 must be replicated first
replay(workQueue);
assigner.createWork();
verify(workQueue);
Assert.assertEquals(1, queuedWork.size());
Assert.assertTrue(queuedWork.containsKey("cluster1"));
Map<Table.ID, String> cluster1Work = queuedWork.get("cluster1");
Assert.assertEquals(2, cluster1Work.size());
Assert.assertTrue(cluster1Work.containsKey(target1.getSourceTableId()));
Assert.assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target1), cluster1Work.get(target1.getSourceTableId()));
Assert.assertTrue(cluster1Work.containsKey(target2.getSourceTableId()));
Assert.assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename2, target2), cluster1Work.get(target2.getSourceTableId()));
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class SequentialWorkAssignerIT method createWorkForFilesInCorrectOrder.
@Test
public void createWorkForFilesInCorrectOrder() throws Exception {
ReplicationTarget target = new ReplicationTarget("cluster1", "table1", Table.ID.of("1"));
Text serializedTarget = target.toText();
// Create two mutations, both of which need replication work done
BatchWriter bw = ReplicationTable.getBatchWriter(conn);
// We want the name of file2 to sort before file1
String filename1 = "z_file1", filename2 = "a_file1";
String file1 = "/accumulo/wal/tserver+port/" + filename1, file2 = "/accumulo/wal/tserver+port/" + filename2;
// File1 was closed before file2, however
Status stat1 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(250).build();
Status stat2 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(500).build();
Mutation m = new Mutation(file1);
WorkSection.add(m, serializedTarget, ProtobufUtil.toValue(stat1));
bw.addMutation(m);
m = new Mutation(file2);
WorkSection.add(m, serializedTarget, ProtobufUtil.toValue(stat2));
bw.addMutation(m);
m = OrderSection.createMutation(file1, stat1.getCreatedTime());
OrderSection.add(m, target.getSourceTableId(), ProtobufUtil.toValue(stat1));
bw.addMutation(m);
m = OrderSection.createMutation(file2, stat2.getCreatedTime());
OrderSection.add(m, target.getSourceTableId(), ProtobufUtil.toValue(stat2));
bw.addMutation(m);
bw.close();
DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
Map<String, Map<Table.ID, String>> queuedWork = new HashMap<>();
assigner.setQueuedWork(queuedWork);
assigner.setWorkQueue(workQueue);
assigner.setMaxQueueSize(Integer.MAX_VALUE);
// Make sure we expect the invocations in the correct order (accumulo is sorted)
workQueue.addWork(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target), file1);
expectLastCall().once();
// file2 is *not* queued because file1 must be replicated first
replay(workQueue);
assigner.createWork();
verify(workQueue);
Assert.assertEquals(1, queuedWork.size());
Assert.assertTrue(queuedWork.containsKey("cluster1"));
Map<Table.ID, String> cluster1Work = queuedWork.get("cluster1");
Assert.assertEquals(1, cluster1Work.size());
Assert.assertTrue(cluster1Work.containsKey(target.getSourceTableId()));
Assert.assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target), cluster1Work.get(target.getSourceTableId()));
}
Aggregations