use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.
the class MasterClientServiceHandler method allReferencesReplicated.
/**
* @return return true records are only in place which are fully replicated
*/
protected boolean allReferencesReplicated(BatchScanner bs, Text tableId, Set<String> relevantLogs) {
Text rowHolder = new Text(), colfHolder = new Text();
for (Entry<Key, Value> entry : bs) {
drainLog.trace("Got key {}", entry.getKey().toStringNoTruncate());
entry.getKey().getColumnQualifier(rowHolder);
if (tableId.equals(rowHolder)) {
entry.getKey().getRow(rowHolder);
entry.getKey().getColumnFamily(colfHolder);
String file;
if (colfHolder.equals(ReplicationSection.COLF)) {
file = rowHolder.toString();
file = file.substring(ReplicationSection.getRowPrefix().length());
} else if (colfHolder.equals(OrderSection.NAME)) {
file = OrderSection.getFile(entry.getKey(), rowHolder);
long timeClosed = OrderSection.getTimeClosed(entry.getKey(), rowHolder);
drainLog.trace("Order section: {} and {}", timeClosed, file);
} else {
file = rowHolder.toString();
}
// Skip files that we didn't observe when we started (new files/data)
if (!relevantLogs.contains(file)) {
drainLog.trace("Found file that we didn't care about {}", file);
continue;
} else {
drainLog.trace("Found file that we *do* care about {}", file);
}
try {
Status stat = Status.parseFrom(entry.getValue().get());
if (!StatusUtil.isFullyReplicated(stat)) {
drainLog.trace("{} and {} is not replicated", file, ProtobufUtil.toString(stat));
return false;
}
drainLog.trace("{} and {} is replicated", file, ProtobufUtil.toString(stat));
} catch (InvalidProtocolBufferException e) {
drainLog.trace("Could not parse protobuf for {}", entry.getKey(), e);
}
}
}
return true;
}
use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.
the class FinishedWorkUpdater method run.
@Override
public void run() {
log.debug("Looking for finished replication work");
if (!ReplicationTable.isOnline(conn)) {
log.debug("Replication table is not yet online, will retry");
return;
}
BatchScanner bs;
BatchWriter replBw;
try {
bs = ReplicationTable.getBatchScanner(conn, 4);
replBw = ReplicationTable.getBatchWriter(conn);
} catch (ReplicationTableOfflineException e) {
log.debug("Table is no longer online, will retry");
return;
}
IteratorSetting cfg = new IteratorSetting(50, WholeRowIterator.class);
bs.addScanIterator(cfg);
WorkSection.limit(bs);
bs.setRanges(Collections.singleton(new Range()));
try {
for (Entry<Key, Value> serializedRow : bs) {
SortedMap<Key, Value> wholeRow;
try {
wholeRow = WholeRowIterator.decodeRow(serializedRow.getKey(), serializedRow.getValue());
} catch (IOException e) {
log.warn("Could not deserialize whole row with key {}", serializedRow.getKey().toStringNoTruncate(), e);
continue;
}
log.debug("Processing work progress for {} with {} columns", serializedRow.getKey().getRow(), wholeRow.size());
Map<Table.ID, Long> tableIdToProgress = new HashMap<>();
boolean error = false;
Text buffer = new Text();
// We want to determine what the minimum point that all Work entries have replicated to
for (Entry<Key, Value> entry : wholeRow.entrySet()) {
Status status;
try {
status = Status.parseFrom(entry.getValue().get());
} catch (InvalidProtocolBufferException e) {
log.warn("Could not deserialize protobuf for {}", entry.getKey(), e);
error = true;
break;
}
// Get the replication target for the work record
entry.getKey().getColumnQualifier(buffer);
ReplicationTarget target = ReplicationTarget.from(buffer);
// Initialize the value in the map if we don't have one
if (!tableIdToProgress.containsKey(target.getSourceTableId())) {
tableIdToProgress.put(target.getSourceTableId(), Long.MAX_VALUE);
}
// Find the minimum value for begin (everyone has replicated up to this offset in the file)
tableIdToProgress.put(target.getSourceTableId(), Math.min(tableIdToProgress.get(target.getSourceTableId()), status.getBegin()));
}
if (error) {
continue;
}
// Update the replication table for each source table we found work records for
for (Entry<Table.ID, Long> entry : tableIdToProgress.entrySet()) {
// If the progress is 0, then no one has replicated anything, and we don't need to update anything
if (0 == entry.getValue()) {
continue;
}
serializedRow.getKey().getRow(buffer);
log.debug("For {}, source table ID {} has replicated through {}", serializedRow.getKey().getRow(), entry.getKey(), entry.getValue());
Mutation replMutation = new Mutation(buffer);
// Set that we replicated at least this much data, ignoring the other fields
Status updatedStatus = StatusUtil.replicated(entry.getValue());
Value serializedUpdatedStatus = ProtobufUtil.toValue(updatedStatus);
// Pull the sourceTableId into a Text
Table.ID srcTableId = entry.getKey();
// Make the mutation
StatusSection.add(replMutation, srcTableId, serializedUpdatedStatus);
log.debug("Updating replication status entry for {} with {}", serializedRow.getKey().getRow(), ProtobufUtil.toString(updatedStatus));
try {
replBw.addMutation(replMutation);
} catch (MutationsRejectedException e) {
log.error("Error writing mutations to update replication Status messages in StatusSection, will retry", e);
return;
}
}
}
} finally {
log.debug("Finished updating files with completed replication work");
bs.close();
try {
replBw.close();
} catch (MutationsRejectedException e) {
log.error("Error writing mutations to update replication Status messages in StatusSection, will retry", e);
}
}
}
use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.
the class GarbageCollectionTest method openReplicationRecordsPreventDeletion.
@Test
public void openReplicationRecordsPreventDeletion() throws Exception {
GarbageCollectionAlgorithm gca = new GarbageCollectionAlgorithm();
TestGCE gce = new TestGCE();
gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/1/t-00001/A000001.rf");
gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/2/t-00002/A000002.rf");
// We replicated all of the data, but we might still write more data to the file
Status status = Status.newBuilder().setClosed(false).setEnd(1000).setBegin(100).build();
gce.filesToReplicate.put("hdfs://foo.com:6000/accumulo/tables/1/t-00001/A000001.rf", status);
gca.collect(gce);
// We need to replicate that one file still, should not delete it.
Assert.assertEquals(1, gce.deletes.size());
Assert.assertEquals("hdfs://foo.com:6000/accumulo/tables/2/t-00002/A000002.rf", gce.deletes.get(0));
}
use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.
the class GarbageCollectionTest method newReplicationRecordsPreventDeletion.
@Test
public void newReplicationRecordsPreventDeletion() throws Exception {
GarbageCollectionAlgorithm gca = new GarbageCollectionAlgorithm();
TestGCE gce = new TestGCE();
gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/1/t-00001/A000001.rf");
gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/2/t-00002/A000002.rf");
// We replicated all of the data, but we might still write more data to the file
Status status = StatusUtil.fileCreated(System.currentTimeMillis());
gce.filesToReplicate.put("hdfs://foo.com:6000/accumulo/tables/1/t-00001/A000001.rf", status);
gca.collect(gce);
// We need to replicate that one file still, should not delete it.
Assert.assertEquals(1, gce.deletes.size());
Assert.assertEquals("hdfs://foo.com:6000/accumulo/tables/2/t-00002/A000002.rf", gce.deletes.get(0));
}
use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.
the class ReplicationTableUtilTest method replEntryMutation.
@Test
public void replEntryMutation() {
// We stopped using a WAL -- we need a reference that this WAL needs to be replicated completely
Status stat = Status.newBuilder().setBegin(0).setEnd(0).setInfiniteEnd(true).setCreatedTime(System.currentTimeMillis()).build();
String file = "file:///accumulo/wal/127.0.0.1+9997" + UUID.randomUUID();
Path filePath = new Path(file);
Text row = new Text(filePath.toString());
KeyExtent extent = new KeyExtent(Table.ID.of("1"), new Text("b"), new Text("a"));
Mutation m = ReplicationTableUtil.createUpdateMutation(filePath, ProtobufUtil.toValue(stat), extent);
Assert.assertEquals(new Text(MetadataSchema.ReplicationSection.getRowPrefix() + row), new Text(m.getRow()));
Assert.assertEquals(1, m.getUpdates().size());
ColumnUpdate col = m.getUpdates().get(0);
Assert.assertEquals(MetadataSchema.ReplicationSection.COLF, new Text(col.getColumnFamily()));
Assert.assertEquals(extent.getTableId().canonicalID(), new Text(col.getColumnQualifier()).toString());
Assert.assertEquals(0, col.getColumnVisibility().length);
Assert.assertArrayEquals(stat.toByteArray(), col.getValue());
}
Aggregations