use of org.apache.accumulo.core.data.Mutation in project accumulo by apache.
the class SequentialWorkAssignerIT method workAcrossTablesHappensConcurrently.
@Test
public void workAcrossTablesHappensConcurrently() throws Exception {
ReplicationTarget target1 = new ReplicationTarget("cluster1", "table1", Table.ID.of("1"));
Text serializedTarget1 = target1.toText();
ReplicationTarget target2 = new ReplicationTarget("cluster1", "table2", Table.ID.of("2"));
Text serializedTarget2 = target2.toText();
// Create two mutations, both of which need replication work done
BatchWriter bw = ReplicationTable.getBatchWriter(conn);
// We want the name of file2 to sort before file1
String filename1 = "z_file1", filename2 = "a_file1";
String file1 = "/accumulo/wal/tserver+port/" + filename1, file2 = "/accumulo/wal/tserver+port/" + filename2;
// File1 was closed before file2, however
Status stat1 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(250).build();
Status stat2 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(500).build();
Mutation m = new Mutation(file1);
WorkSection.add(m, serializedTarget1, ProtobufUtil.toValue(stat1));
bw.addMutation(m);
m = new Mutation(file2);
WorkSection.add(m, serializedTarget2, ProtobufUtil.toValue(stat2));
bw.addMutation(m);
m = OrderSection.createMutation(file1, stat1.getCreatedTime());
OrderSection.add(m, target1.getSourceTableId(), ProtobufUtil.toValue(stat1));
bw.addMutation(m);
m = OrderSection.createMutation(file2, stat2.getCreatedTime());
OrderSection.add(m, target2.getSourceTableId(), ProtobufUtil.toValue(stat2));
bw.addMutation(m);
bw.close();
DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
Map<String, Map<Table.ID, String>> queuedWork = new HashMap<>();
assigner.setQueuedWork(queuedWork);
assigner.setWorkQueue(workQueue);
assigner.setMaxQueueSize(Integer.MAX_VALUE);
// Make sure we expect the invocations in the correct order (accumulo is sorted)
workQueue.addWork(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target1), file1);
expectLastCall().once();
workQueue.addWork(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename2, target2), file2);
expectLastCall().once();
// file2 is *not* queued because file1 must be replicated first
replay(workQueue);
assigner.createWork();
verify(workQueue);
Assert.assertEquals(1, queuedWork.size());
Assert.assertTrue(queuedWork.containsKey("cluster1"));
Map<Table.ID, String> cluster1Work = queuedWork.get("cluster1");
Assert.assertEquals(2, cluster1Work.size());
Assert.assertTrue(cluster1Work.containsKey(target1.getSourceTableId()));
Assert.assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target1), cluster1Work.get(target1.getSourceTableId()));
Assert.assertTrue(cluster1Work.containsKey(target2.getSourceTableId()));
Assert.assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename2, target2), cluster1Work.get(target2.getSourceTableId()));
}
use of org.apache.accumulo.core.data.Mutation in project accumulo by apache.
the class SequentialWorkAssignerIT method createWorkForFilesInCorrectOrder.
@Test
public void createWorkForFilesInCorrectOrder() throws Exception {
ReplicationTarget target = new ReplicationTarget("cluster1", "table1", Table.ID.of("1"));
Text serializedTarget = target.toText();
// Create two mutations, both of which need replication work done
BatchWriter bw = ReplicationTable.getBatchWriter(conn);
// We want the name of file2 to sort before file1
String filename1 = "z_file1", filename2 = "a_file1";
String file1 = "/accumulo/wal/tserver+port/" + filename1, file2 = "/accumulo/wal/tserver+port/" + filename2;
// File1 was closed before file2, however
Status stat1 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(250).build();
Status stat2 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(500).build();
Mutation m = new Mutation(file1);
WorkSection.add(m, serializedTarget, ProtobufUtil.toValue(stat1));
bw.addMutation(m);
m = new Mutation(file2);
WorkSection.add(m, serializedTarget, ProtobufUtil.toValue(stat2));
bw.addMutation(m);
m = OrderSection.createMutation(file1, stat1.getCreatedTime());
OrderSection.add(m, target.getSourceTableId(), ProtobufUtil.toValue(stat1));
bw.addMutation(m);
m = OrderSection.createMutation(file2, stat2.getCreatedTime());
OrderSection.add(m, target.getSourceTableId(), ProtobufUtil.toValue(stat2));
bw.addMutation(m);
bw.close();
DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
Map<String, Map<Table.ID, String>> queuedWork = new HashMap<>();
assigner.setQueuedWork(queuedWork);
assigner.setWorkQueue(workQueue);
assigner.setMaxQueueSize(Integer.MAX_VALUE);
// Make sure we expect the invocations in the correct order (accumulo is sorted)
workQueue.addWork(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target), file1);
expectLastCall().once();
// file2 is *not* queued because file1 must be replicated first
replay(workQueue);
assigner.createWork();
verify(workQueue);
Assert.assertEquals(1, queuedWork.size());
Assert.assertTrue(queuedWork.containsKey("cluster1"));
Map<Table.ID, String> cluster1Work = queuedWork.get("cluster1");
Assert.assertEquals(1, cluster1Work.size());
Assert.assertTrue(cluster1Work.containsKey(target.getSourceTableId()));
Assert.assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target), cluster1Work.get(target.getSourceTableId()));
}
use of org.apache.accumulo.core.data.Mutation in project accumulo by apache.
the class StatusMakerIT method orderRecordsCreatedWithNoCreatedTime.
@Test
public void orderRecordsCreatedWithNoCreatedTime() throws Exception {
String sourceTable = testName.getMethodName();
conn.tableOperations().create(sourceTable);
ReplicationTableUtil.configureMetadataTable(conn, sourceTable);
BatchWriter bw = conn.createBatchWriter(sourceTable, new BatchWriterConfig());
String walPrefix = "hdfs://localhost:8020/accumulo/wals/tserver+port/";
List<String> files = Arrays.asList(walPrefix + UUID.randomUUID(), walPrefix + UUID.randomUUID(), walPrefix + UUID.randomUUID(), walPrefix + UUID.randomUUID());
Map<String, Long> fileToTableId = new HashMap<>();
Status.Builder statBuilder = Status.newBuilder().setBegin(0).setEnd(0).setInfiniteEnd(true).setClosed(true);
Map<String, Long> statuses = new HashMap<>();
long index = 1;
for (String file : files) {
Mutation m = new Mutation(ReplicationSection.getRowPrefix() + file);
m.put(ReplicationSection.COLF, new Text(Long.toString(index)), ProtobufUtil.toValue(statBuilder.build()));
bw.addMutation(m);
fileToTableId.put(file, index);
FileStatus status = EasyMock.mock(FileStatus.class);
EasyMock.expect(status.getModificationTime()).andReturn(index);
EasyMock.replay(status);
statuses.put(file, index);
EasyMock.expect(fs.exists(new Path(file))).andReturn(true);
EasyMock.expect(fs.getFileStatus(new Path(file))).andReturn(status);
index++;
}
EasyMock.replay(fs);
bw.close();
StatusMaker statusMaker = new StatusMaker(conn, fs);
statusMaker.setSourceTableName(sourceTable);
statusMaker.run();
Scanner s = conn.createScanner(sourceTable, Authorizations.EMPTY);
s.setRange(ReplicationSection.getRange());
s.fetchColumnFamily(ReplicationSection.COLF);
Assert.assertEquals(0, Iterables.size(s));
s = ReplicationTable.getScanner(conn);
OrderSection.limit(s);
Iterator<Entry<Key, Value>> iter = s.iterator();
Assert.assertTrue("Found no order records in replication table", iter.hasNext());
Iterator<String> expectedFiles = files.iterator();
Text buff = new Text();
while (expectedFiles.hasNext() && iter.hasNext()) {
String file = expectedFiles.next();
Entry<Key, Value> entry = iter.next();
Assert.assertEquals(file, OrderSection.getFile(entry.getKey(), buff));
OrderSection.getTableId(entry.getKey(), buff);
Assert.assertEquals(fileToTableId.get(file).intValue(), Integer.parseInt(buff.toString()));
Status status = Status.parseFrom(entry.getValue().get());
Assert.assertTrue(status.hasCreatedTime());
Assert.assertEquals((long) statuses.get(file), status.getCreatedTime());
}
Assert.assertFalse("Found more files unexpectedly", expectedFiles.hasNext());
Assert.assertFalse("Found more entries in replication table unexpectedly", iter.hasNext());
s = conn.createScanner(sourceTable, Authorizations.EMPTY);
s.setRange(ReplicationSection.getRange());
s.fetchColumnFamily(ReplicationSection.COLF);
Assert.assertEquals(0, Iterables.size(s));
s = ReplicationTable.getScanner(conn);
s.setRange(ReplicationSection.getRange());
iter = s.iterator();
Assert.assertTrue("Found no stat records in replication table", iter.hasNext());
Collections.sort(files);
expectedFiles = files.iterator();
while (expectedFiles.hasNext() && iter.hasNext()) {
String file = expectedFiles.next();
Entry<Key, Value> entry = iter.next();
Status status = Status.parseFrom(entry.getValue().get());
Assert.assertTrue(status.hasCreatedTime());
Assert.assertEquals((long) statuses.get(file), status.getCreatedTime());
}
Assert.assertFalse("Found more files unexpectedly", expectedFiles.hasNext());
Assert.assertFalse("Found more entries in replication table unexpectedly", iter.hasNext());
}
use of org.apache.accumulo.core.data.Mutation in project accumulo by apache.
the class StatusMakerIT method closedMessagesCreateOrderRecords.
@Test
public void closedMessagesCreateOrderRecords() throws Exception {
String sourceTable = testName.getMethodName();
conn.tableOperations().create(sourceTable);
ReplicationTableUtil.configureMetadataTable(conn, sourceTable);
BatchWriter bw = conn.createBatchWriter(sourceTable, new BatchWriterConfig());
String walPrefix = "hdfs://localhost:8020/accumulo/wals/tserver+port/";
List<String> files = Arrays.asList(walPrefix + UUID.randomUUID(), walPrefix + UUID.randomUUID(), walPrefix + UUID.randomUUID(), walPrefix + UUID.randomUUID());
Map<String, Integer> fileToTableId = new HashMap<>();
Status.Builder statBuilder = Status.newBuilder().setBegin(0).setEnd(0).setInfiniteEnd(true).setClosed(true);
int index = 1;
long time = System.currentTimeMillis();
for (String file : files) {
statBuilder.setCreatedTime(time++);
Mutation m = new Mutation(ReplicationSection.getRowPrefix() + file);
m.put(ReplicationSection.COLF, new Text(Integer.toString(index)), ProtobufUtil.toValue(statBuilder.build()));
bw.addMutation(m);
fileToTableId.put(file, index);
index++;
}
bw.close();
StatusMaker statusMaker = new StatusMaker(conn, fs);
statusMaker.setSourceTableName(sourceTable);
statusMaker.run();
Iterator<Entry<Key, Value>> iter;
Iterator<String> expectedFiles;
try (Scanner s = conn.createScanner(sourceTable, Authorizations.EMPTY)) {
s.setRange(ReplicationSection.getRange());
s.fetchColumnFamily(ReplicationSection.COLF);
Assert.assertEquals(0, Iterables.size(s));
}
try (Scanner s = ReplicationTable.getScanner(conn)) {
OrderSection.limit(s);
iter = s.iterator();
Assert.assertTrue("Found no order records in replication table", iter.hasNext());
expectedFiles = files.iterator();
Text buff = new Text();
while (expectedFiles.hasNext() && iter.hasNext()) {
String file = expectedFiles.next();
Entry<Key, Value> entry = iter.next();
Assert.assertEquals(file, OrderSection.getFile(entry.getKey(), buff));
OrderSection.getTableId(entry.getKey(), buff);
Assert.assertEquals(fileToTableId.get(file).intValue(), Integer.parseInt(buff.toString()));
}
}
Assert.assertFalse("Found more files unexpectedly", expectedFiles.hasNext());
Assert.assertFalse("Found more entries in replication table unexpectedly", iter.hasNext());
}
use of org.apache.accumulo.core.data.Mutation in project accumulo by apache.
the class UnorderedWorkAssignerIT method workNotReAdded.
@Test
public void workNotReAdded() throws Exception {
Set<String> queuedWork = new HashSet<>();
assigner.setQueuedWork(queuedWork);
ReplicationTarget target = new ReplicationTarget("cluster1", "table1", Table.ID.of("1"));
String serializedTarget = target.getPeerName() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getRemoteIdentifier() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getSourceTableId();
queuedWork.add("wal1|" + serializedTarget.toString());
// Create two mutations, both of which need replication work done
BatchWriter bw = ReplicationTable.getBatchWriter(conn);
String file1 = "/accumulo/wal/tserver+port/wal1";
Mutation m = new Mutation(file1);
WorkSection.add(m, target.toText(), StatusUtil.openWithUnknownLengthValue());
bw.addMutation(m);
bw.close();
DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
assigner.setWorkQueue(workQueue);
assigner.setMaxQueueSize(Integer.MAX_VALUE);
replay(workQueue);
assigner.createWork();
verify(workQueue);
}
Aggregations