use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class ReplicationIT method filesClosedAfterUnused.
@Test
public void filesClosedAfterUnused() throws Exception {
try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
String table = "table";
Map<String, String> replicate_props = new HashMap<>();
replicate_props.put(Property.TABLE_REPLICATION.getKey(), "true");
replicate_props.put(Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
client.tableOperations().create(table, new NewTableConfiguration().setProperties(replicate_props));
TableId tableId = TableId.of(client.tableOperations().tableIdMap().get(table));
assertNotNull(tableId);
// just sleep
client.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1", ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, "50000"));
// Write a mutation to make a log file
try (BatchWriter bw = client.createBatchWriter(table)) {
Mutation m = new Mutation("one");
m.put("", "", "");
bw.addMutation(m);
}
// Write another to make sure the logger rolls itself?
try (BatchWriter bw = client.createBatchWriter(table)) {
Mutation m = new Mutation("three");
m.put("", "", "");
bw.addMutation(m);
}
try (Scanner s = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.fetchColumnFamily(LogColumnFamily.NAME);
s.setRange(TabletsSection.getRange(tableId));
Set<String> wals = new HashSet<>();
for (Entry<Key, Value> entry : s) {
LogEntry logEntry = LogEntry.fromMetaWalEntry(entry);
wals.add(new Path(logEntry.filename).toString());
}
log.warn("Found wals {}", wals);
try (BatchWriter bw = client.createBatchWriter(table)) {
Mutation m = new Mutation("three");
byte[] bytes = new byte[1024 * 1024];
m.put("1".getBytes(), new byte[0], bytes);
m.put("2".getBytes(), new byte[0], bytes);
m.put("3".getBytes(), new byte[0], bytes);
m.put("4".getBytes(), new byte[0], bytes);
m.put("5".getBytes(), new byte[0], bytes);
bw.addMutation(m);
}
client.tableOperations().flush(table, null, null, true);
while (!ReplicationTable.isOnline(client)) {
sleepUninterruptibly(MILLIS_BETWEEN_REPLICATION_TABLE_ONLINE_CHECKS, TimeUnit.MILLISECONDS);
}
for (int i = 0; i < 10; i++) {
try (Scanner s2 = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s2.fetchColumnFamily(LogColumnFamily.NAME);
s2.setRange(TabletsSection.getRange(tableId));
for (Entry<Key, Value> entry : s2) {
log.info("{}={}", entry.getKey().toStringNoTruncate(), entry.getValue());
}
}
try (Scanner s3 = ReplicationTable.getScanner(client)) {
StatusSection.limit(s3);
Text buff = new Text();
boolean allReferencedLogsClosed = true;
int recordsFound = 0;
for (Entry<Key, Value> e : s3) {
recordsFound++;
allReferencedLogsClosed = true;
StatusSection.getFile(e.getKey(), buff);
String file = buff.toString();
if (wals.contains(file)) {
Status stat = Status.parseFrom(e.getValue().get());
if (!stat.getClosed()) {
log.info("{} wasn't closed", file);
allReferencedLogsClosed = false;
}
}
}
if (recordsFound > 0 && allReferencedLogsClosed) {
return;
}
Thread.sleep(2000);
} catch (RuntimeException e) {
Throwable cause = e.getCause();
if (cause instanceof AccumuloSecurityException) {
AccumuloSecurityException ase = (AccumuloSecurityException) cause;
switch(ase.getSecurityErrorCode()) {
case PERMISSION_DENIED:
// We tried to read the replication table before the GRANT went through
Thread.sleep(2000);
break;
default:
throw e;
}
}
}
}
fail("We had a file that was referenced but didn't get closed");
}
}
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class SequentialWorkAssignerIT method createWorkForFilesInCorrectOrder.
@Test
public void createWorkForFilesInCorrectOrder() throws Exception {
ReplicationTarget target = new ReplicationTarget("cluster1", "table1", TableId.of("1"));
Text serializedTarget = target.toText();
// Create two mutations, both of which need replication work done
BatchWriter bw = ReplicationTable.getBatchWriter(client);
// We want the name of file2 to sort before file1
String filename1 = "z_file1", filename2 = "a_file1";
String file1 = "/accumulo/wal/tserver+port/" + filename1, file2 = "/accumulo/wal/tserver+port/" + filename2;
// File1 was closed before file2, however
Status stat1 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(250).build();
Status stat2 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(500).build();
Mutation m = new Mutation(file1);
WorkSection.add(m, serializedTarget, ProtobufUtil.toValue(stat1));
bw.addMutation(m);
m = new Mutation(file2);
WorkSection.add(m, serializedTarget, ProtobufUtil.toValue(stat2));
bw.addMutation(m);
m = OrderSection.createMutation(file1, stat1.getCreatedTime());
OrderSection.add(m, target.getSourceTableId(), ProtobufUtil.toValue(stat1));
bw.addMutation(m);
m = OrderSection.createMutation(file2, stat2.getCreatedTime());
OrderSection.add(m, target.getSourceTableId(), ProtobufUtil.toValue(stat2));
bw.addMutation(m);
bw.close();
DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
Map<String, Map<TableId, String>> queuedWork = new HashMap<>();
assigner.setQueuedWork(queuedWork);
assigner.setWorkQueue(workQueue);
assigner.setMaxQueueSize(Integer.MAX_VALUE);
// Make sure we expect the invocations in the correct order (accumulo is sorted)
workQueue.addWork(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target), file1);
expectLastCall().once();
// file2 is *not* queued because file1 must be replicated first
replay(workQueue);
assigner.createWork();
verify(workQueue);
assertEquals(1, queuedWork.size());
assertTrue(queuedWork.containsKey("cluster1"));
Map<TableId, String> cluster1Work = queuedWork.get("cluster1");
assertEquals(1, cluster1Work.size());
assertTrue(cluster1Work.containsKey(target.getSourceTableId()));
assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target), cluster1Work.get(target.getSourceTableId()));
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class SequentialWorkAssignerIT method workAcrossTablesHappensConcurrently.
@Test
public void workAcrossTablesHappensConcurrently() throws Exception {
ReplicationTarget target1 = new ReplicationTarget("cluster1", "table1", TableId.of("1"));
Text serializedTarget1 = target1.toText();
ReplicationTarget target2 = new ReplicationTarget("cluster1", "table2", TableId.of("2"));
Text serializedTarget2 = target2.toText();
// Create two mutations, both of which need replication work done
BatchWriter bw = ReplicationTable.getBatchWriter(client);
// We want the name of file2 to sort before file1
String filename1 = "z_file1", filename2 = "a_file1";
String file1 = "/accumulo/wal/tserver+port/" + filename1, file2 = "/accumulo/wal/tserver+port/" + filename2;
// File1 was closed before file2, however
Status stat1 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(250).build();
Status stat2 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(500).build();
Mutation m = new Mutation(file1);
WorkSection.add(m, serializedTarget1, ProtobufUtil.toValue(stat1));
bw.addMutation(m);
m = new Mutation(file2);
WorkSection.add(m, serializedTarget2, ProtobufUtil.toValue(stat2));
bw.addMutation(m);
m = OrderSection.createMutation(file1, stat1.getCreatedTime());
OrderSection.add(m, target1.getSourceTableId(), ProtobufUtil.toValue(stat1));
bw.addMutation(m);
m = OrderSection.createMutation(file2, stat2.getCreatedTime());
OrderSection.add(m, target2.getSourceTableId(), ProtobufUtil.toValue(stat2));
bw.addMutation(m);
bw.close();
DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
Map<String, Map<TableId, String>> queuedWork = new HashMap<>();
assigner.setQueuedWork(queuedWork);
assigner.setWorkQueue(workQueue);
assigner.setMaxQueueSize(Integer.MAX_VALUE);
// Make sure we expect the invocations in the correct order (accumulo is sorted)
workQueue.addWork(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target1), file1);
expectLastCall().once();
workQueue.addWork(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename2, target2), file2);
expectLastCall().once();
// file2 is *not* queued because file1 must be replicated first
replay(workQueue);
assigner.createWork();
verify(workQueue);
assertEquals(1, queuedWork.size());
assertTrue(queuedWork.containsKey("cluster1"));
Map<TableId, String> cluster1Work = queuedWork.get("cluster1");
assertEquals(2, cluster1Work.size());
assertTrue(cluster1Work.containsKey(target1.getSourceTableId()));
assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target1), cluster1Work.get(target1.getSourceTableId()));
assertTrue(cluster1Work.containsKey(target2.getSourceTableId()));
assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename2, target2), cluster1Work.get(target2.getSourceTableId()));
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class CollectTabletStats method findTablets.
private static List<KeyExtent> findTablets(ClientContext context, boolean selectLocalTablets, String tableName, SortedMap<KeyExtent, String> tabletLocations) throws Exception {
TableId tableId = context.getTableId(tableName);
MetadataServicer.forTableId(context, tableId).getTabletLocations(tabletLocations);
InetAddress localaddress = InetAddress.getLocalHost();
List<KeyExtent> candidates = new ArrayList<>();
for (Entry<KeyExtent, String> entry : tabletLocations.entrySet()) {
String loc = entry.getValue();
if (loc != null) {
boolean isLocal = HostAndPort.fromString(entry.getValue()).getHost().equals(localaddress.getHostName());
if (selectLocalTablets && isLocal) {
candidates.add(entry.getKey());
} else if (!selectLocalTablets && !isLocal) {
candidates.add(entry.getKey());
}
}
}
return candidates;
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class ReplicationOperationsImplIT method waitsUntilEntriesAreReplicated.
@Test
public void waitsUntilEntriesAreReplicated() throws Exception {
client.tableOperations().create("foo");
TableId tableId = TableId.of(client.tableOperations().tableIdMap().get("foo"));
String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID(), file2 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
BatchWriter bw = ReplicationTable.getBatchWriter(client);
Mutation m = new Mutation(file1);
StatusSection.add(m, tableId, ProtobufUtil.toValue(stat));
bw.addMutation(m);
m = new Mutation(file2);
StatusSection.add(m, tableId, ProtobufUtil.toValue(stat));
bw.addMutation(m);
bw.close();
bw = client.createBatchWriter(MetadataTable.NAME);
m = new Mutation(ReplicationSection.getRowPrefix() + file1);
m.put(ReplicationSection.COLF, new Text(tableId.canonical()), ProtobufUtil.toValue(stat));
bw.addMutation(m);
m = new Mutation(ReplicationSection.getRowPrefix() + file2);
m.put(ReplicationSection.COLF, new Text(tableId.canonical()), ProtobufUtil.toValue(stat));
bw.close();
final AtomicBoolean done = new AtomicBoolean(false);
final AtomicBoolean exception = new AtomicBoolean(false);
final ReplicationOperationsImpl roi = getReplicationOperations();
Thread t = new Thread(() -> {
try {
roi.drain("foo");
} catch (Exception e) {
log.error("Got error", e);
exception.set(true);
}
done.set(true);
});
t.start();
// With the records, we shouldn't be drained
assertFalse(done.get());
bw = client.createBatchWriter(MetadataTable.NAME);
m = new Mutation(ReplicationSection.getRowPrefix() + file1);
m.putDelete(ReplicationSection.COLF, new Text(tableId.canonical()));
bw.addMutation(m);
bw.flush();
assertFalse(done.get());
m = new Mutation(ReplicationSection.getRowPrefix() + file2);
m.putDelete(ReplicationSection.COLF, new Text(tableId.canonical()));
bw.addMutation(m);
bw.flush();
bw.close();
// Removing metadata entries doesn't change anything
assertFalse(done.get());
// Remove the replication entries too
bw = ReplicationTable.getBatchWriter(client);
m = new Mutation(file1);
m.putDelete(StatusSection.NAME, new Text(tableId.canonical()));
bw.addMutation(m);
bw.flush();
assertFalse(done.get());
m = new Mutation(file2);
m.putDelete(StatusSection.NAME, new Text(tableId.canonical()));
bw.addMutation(m);
bw.flush();
try {
t.join(5000);
} catch (InterruptedException e) {
fail("ReplicationOperations.drain did not complete");
}
// After both metadata and replication
assertTrue("Drain never finished", done.get());
assertFalse("Saw unexpected exception", exception.get());
}
Aggregations