use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.
the class StatusCombinerMacIT method test.
@Test
public void test() throws Exception {
Connector conn = getConnector();
ClusterUser user = getAdminUser();
ReplicationTable.setOnline(conn);
conn.securityOperations().grantTablePermission(user.getPrincipal(), ReplicationTable.NAME, TablePermission.WRITE);
BatchWriter bw = ReplicationTable.getBatchWriter(conn);
long createTime = System.currentTimeMillis();
try {
Mutation m = new Mutation("file:/accumulo/wal/HW10447.local+56808/93cdc17e-7521-44fa-87b5-37f45bcb92d3");
StatusSection.add(m, Table.ID.of("1"), StatusUtil.fileCreatedValue(createTime));
bw.addMutation(m);
} finally {
bw.close();
}
Entry<Key, Value> entry;
try (Scanner s = ReplicationTable.getScanner(conn)) {
entry = Iterables.getOnlyElement(s);
Assert.assertEquals(StatusUtil.fileCreatedValue(createTime), entry.getValue());
bw = ReplicationTable.getBatchWriter(conn);
try {
Mutation m = new Mutation("file:/accumulo/wal/HW10447.local+56808/93cdc17e-7521-44fa-87b5-37f45bcb92d3");
StatusSection.add(m, Table.ID.of("1"), ProtobufUtil.toValue(StatusUtil.replicated(Long.MAX_VALUE)));
bw.addMutation(m);
} finally {
bw.close();
}
}
try (Scanner s = ReplicationTable.getScanner(conn)) {
entry = Iterables.getOnlyElement(s);
Status stat = Status.parseFrom(entry.getValue().get());
Assert.assertEquals(Long.MAX_VALUE, stat.getBegin());
}
}
use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.
the class StatusMakerIT method closedMessagesAreDeleted.
@Test
public void closedMessagesAreDeleted() throws Exception {
String sourceTable = testName.getMethodName();
conn.tableOperations().create(sourceTable);
ReplicationTableUtil.configureMetadataTable(conn, sourceTable);
BatchWriter bw = conn.createBatchWriter(sourceTable, new BatchWriterConfig());
String walPrefix = "hdfs://localhost:8020/accumulo/wals/tserver+port/";
Set<String> files = Sets.newHashSet(walPrefix + UUID.randomUUID(), walPrefix + UUID.randomUUID(), walPrefix + UUID.randomUUID(), walPrefix + UUID.randomUUID());
Map<String, Integer> fileToTableId = new HashMap<>();
Status stat = Status.newBuilder().setBegin(0).setEnd(0).setInfiniteEnd(true).setClosed(true).setCreatedTime(System.currentTimeMillis()).build();
int index = 1;
for (String file : files) {
Mutation m = new Mutation(ReplicationSection.getRowPrefix() + file);
m.put(ReplicationSection.COLF, new Text(Integer.toString(index)), ProtobufUtil.toValue(stat));
bw.addMutation(m);
fileToTableId.put(file, index);
index++;
}
bw.close();
StatusMaker statusMaker = new StatusMaker(conn, fs);
statusMaker.setSourceTableName(sourceTable);
statusMaker.run();
try (Scanner s = conn.createScanner(sourceTable, Authorizations.EMPTY)) {
s.setRange(ReplicationSection.getRange());
s.fetchColumnFamily(ReplicationSection.COLF);
for (Entry<Key, Value> e : s) {
System.out.println(e.getKey().toStringNoTruncate() + " " + e.getValue());
}
}
try (Scanner s = conn.createScanner(sourceTable, Authorizations.EMPTY)) {
s.setRange(ReplicationSection.getRange());
s.fetchColumnFamily(ReplicationSection.COLF);
Assert.assertEquals(0, Iterables.size(s));
}
}
use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.
the class UnorderedWorkAssignerIT method createWorkForFilesNeedingIt.
@Test
public void createWorkForFilesNeedingIt() throws Exception {
ReplicationTarget target1 = new ReplicationTarget("cluster1", "table1", Table.ID.of("1")), target2 = new ReplicationTarget("cluster1", "table2", Table.ID.of("2"));
Text serializedTarget1 = target1.toText(), serializedTarget2 = target2.toText();
String keyTarget1 = target1.getPeerName() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target1.getRemoteIdentifier() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target1.getSourceTableId(), keyTarget2 = target2.getPeerName() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target2.getRemoteIdentifier() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target2.getSourceTableId();
Status.Builder builder = Status.newBuilder().setBegin(0).setEnd(0).setInfiniteEnd(true).setClosed(false).setCreatedTime(5l);
Status status1 = builder.build();
builder.setCreatedTime(10l);
Status status2 = builder.build();
// Create two mutations, both of which need replication work done
BatchWriter bw = ReplicationTable.getBatchWriter(conn);
String filename1 = UUID.randomUUID().toString(), filename2 = UUID.randomUUID().toString();
String file1 = "/accumulo/wal/tserver+port/" + filename1, file2 = "/accumulo/wal/tserver+port/" + filename2;
Mutation m = new Mutation(file1);
WorkSection.add(m, serializedTarget1, ProtobufUtil.toValue(status1));
bw.addMutation(m);
m = OrderSection.createMutation(file1, status1.getCreatedTime());
OrderSection.add(m, target1.getSourceTableId(), ProtobufUtil.toValue(status1));
bw.addMutation(m);
m = new Mutation(file2);
WorkSection.add(m, serializedTarget2, ProtobufUtil.toValue(status2));
bw.addMutation(m);
m = OrderSection.createMutation(file2, status2.getCreatedTime());
OrderSection.add(m, target2.getSourceTableId(), ProtobufUtil.toValue(status2));
bw.addMutation(m);
bw.close();
DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
HashSet<String> queuedWork = new HashSet<>();
assigner.setQueuedWork(queuedWork);
assigner.setWorkQueue(workQueue);
assigner.setMaxQueueSize(Integer.MAX_VALUE);
// Make sure we expect the invocations in the order they were created
String key = filename1 + "|" + keyTarget1;
workQueue.addWork(key, file1);
expectLastCall().once();
key = filename2 + "|" + keyTarget2;
workQueue.addWork(key, file2);
expectLastCall().once();
replay(workQueue);
assigner.createWork();
verify(workQueue);
}
use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.
the class UnusedWalDoesntCloseReplicationStatusIT method test.
@Test
public void test() throws Exception {
File accumuloDir = this.getCluster().getConfig().getAccumuloDir();
final Connector conn = getConnector();
final String tableName = getUniqueNames(1)[0];
conn.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
conn.tableOperations().create(tableName);
final Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
final int numericTableId = Integer.parseInt(tableId.canonicalID());
final int fakeTableId = numericTableId + 1;
Assert.assertNotNull("Did not find table ID", tableId);
conn.tableOperations().setProperty(tableName, Property.TABLE_REPLICATION.getKey(), "true");
conn.tableOperations().setProperty(tableName, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
// just sleep
conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1", ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, "50000"));
FileSystem fs = FileSystem.getLocal(new Configuration());
File tserverWalDir = new File(accumuloDir, ServerConstants.WAL_DIR + Path.SEPARATOR + "faketserver+port");
File tserverWal = new File(tserverWalDir, UUID.randomUUID().toString());
fs.mkdirs(new Path(tserverWalDir.getAbsolutePath()));
// Make a fake WAL with no data in it for our real table
FSDataOutputStream out = fs.create(new Path(tserverWal.getAbsolutePath()));
out.write(DfsLogger.LOG_FILE_HEADER_V3.getBytes(UTF_8));
DataOutputStream dos = new DataOutputStream(out);
dos.writeUTF("NullCryptoModule");
// Fake a single update WAL that has a mutation for another table
LogFileKey key = new LogFileKey();
LogFileValue value = new LogFileValue();
key.event = OPEN;
key.tserverSession = tserverWal.getAbsolutePath();
key.filename = tserverWal.getAbsolutePath();
key.write(out);
value.write(out);
key.event = LogEvents.DEFINE_TABLET;
key.tablet = new KeyExtent(Table.ID.of(Integer.toString(fakeTableId)), null, null);
key.seq = 1l;
key.tid = 1;
key.write(dos);
value.write(dos);
key.tablet = null;
key.event = LogEvents.MUTATION;
key.filename = tserverWal.getAbsolutePath();
value.mutations = Arrays.asList(new ServerMutation(new Text("row")));
key.write(dos);
value.write(dos);
key.event = LogEvents.COMPACTION_START;
key.filename = accumuloDir.getAbsolutePath() + "/tables/" + fakeTableId + "/t-000001/A000001.rf";
value.mutations = Collections.emptyList();
key.write(dos);
value.write(dos);
key.event = LogEvents.COMPACTION_FINISH;
value.mutations = Collections.emptyList();
key.write(dos);
value.write(dos);
dos.close();
BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
Mutation m = new Mutation("m");
m.put("m", "m", "M");
bw.addMutation(m);
bw.close();
log.info("State of metadata table after inserting a record");
try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
for (Entry<Key, Value> entry : s) {
System.out.println(entry.getKey().toStringNoTruncate() + " " + entry.getValue());
}
}
try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.setRange(MetadataSchema.ReplicationSection.getRange());
for (Entry<Key, Value> entry : s) {
System.out.println(entry.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
}
log.info("Offline'ing table");
conn.tableOperations().offline(tableName, true);
// Add our fake WAL to the log column for this table
String walUri = tserverWal.toURI().toString();
KeyExtent extent = new KeyExtent(tableId, null, null);
bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
m = new Mutation(extent.getMetadataEntry());
m.put(MetadataSchema.TabletsSection.LogColumnFamily.NAME, new Text("localhost:12345/" + walUri), new Value((walUri + "|1").getBytes(UTF_8)));
bw.addMutation(m);
// Add a replication entry for our fake WAL
m = new Mutation(MetadataSchema.ReplicationSection.getRowPrefix() + new Path(walUri).toString());
m.put(MetadataSchema.ReplicationSection.COLF, new Text(tableId.getUtf8()), new Value(StatusUtil.fileCreated(System.currentTimeMillis()).toByteArray()));
bw.addMutation(m);
bw.close();
log.info("State of metadata after injecting WAL manually");
}
try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
for (Entry<Key, Value> entry : s) {
log.info("{} {}", entry.getKey().toStringNoTruncate(), entry.getValue());
}
}
try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.setRange(MetadataSchema.ReplicationSection.getRange());
for (Entry<Key, Value> entry : s) {
log.info("{} {}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
}
log.info("Bringing table online");
conn.tableOperations().online(tableName, true);
Assert.assertEquals(1, Iterables.size(conn.createScanner(tableName, Authorizations.EMPTY)));
log.info("Table has performed recovery, state of metadata:");
}
try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
for (Entry<Key, Value> entry : s) {
log.info("{} {}", entry.getKey().toStringNoTruncate(), entry.getValue());
}
}
try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.setRange(MetadataSchema.ReplicationSection.getRange());
for (Entry<Key, Value> entry : s) {
Status status = Status.parseFrom(entry.getValue().get());
log.info("{} {}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(status));
Assert.assertFalse("Status record was closed and it should not be", status.getClosed());
}
}
}
use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.
the class FinishedWorkUpdaterIT method chooseMinimumBeginOffsetInfiniteEnd.
@Test
public void chooseMinimumBeginOffsetInfiniteEnd() throws Exception {
conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME, TablePermission.READ);
conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME, TablePermission.WRITE);
ReplicationTable.setOnline(conn);
String file = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
// @formatter:off
Status stat1 = Status.newBuilder().setBegin(100).setEnd(1000).setClosed(true).setInfiniteEnd(true).build(), stat2 = Status.newBuilder().setBegin(1).setEnd(1000).setClosed(true).setInfiniteEnd(true).build(), stat3 = Status.newBuilder().setBegin(500).setEnd(1000).setClosed(true).setInfiniteEnd(true).build();
ReplicationTarget target1 = new ReplicationTarget("peer1", "table1", Table.ID.of("1")), target2 = new ReplicationTarget("peer2", "table2", Table.ID.of("1")), target3 = new ReplicationTarget("peer3", "table3", Table.ID.of("1"));
// @formatter:on
// Create a single work record for a file to some peer
BatchWriter bw = ReplicationTable.getBatchWriter(conn);
Mutation m = new Mutation(file);
WorkSection.add(m, target1.toText(), ProtobufUtil.toValue(stat1));
WorkSection.add(m, target2.toText(), ProtobufUtil.toValue(stat2));
WorkSection.add(m, target3.toText(), ProtobufUtil.toValue(stat3));
bw.addMutation(m);
bw.close();
updater.run();
try (Scanner s = ReplicationTable.getScanner(conn)) {
s.setRange(Range.exact(file));
StatusSection.limit(s);
Entry<Key, Value> entry = Iterables.getOnlyElement(s);
Assert.assertEquals(entry.getKey().getColumnFamily(), StatusSection.NAME);
Assert.assertEquals(entry.getKey().getColumnQualifier().toString(), target1.getSourceTableId().canonicalID());
// We should only rely on the correct begin attribute being returned
Status actual = Status.parseFrom(entry.getValue().get());
Assert.assertEquals(1, actual.getBegin());
}
}
Aggregations