use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class RemoveCompleteReplicationRecords method removeRowIfNecessary.
protected long removeRowIfNecessary(BatchWriter bw, SortedMap<Key, Value> columns, Text row, Text colf, Text colq) {
long recordsRemoved = 0;
if (columns.isEmpty()) {
return recordsRemoved;
}
Mutation m = new Mutation(row);
Map<Table.ID, Long> tableToTimeCreated = new HashMap<>();
for (Entry<Key, Value> entry : columns.entrySet()) {
Status status = null;
try {
status = Status.parseFrom(entry.getValue().get());
} catch (InvalidProtocolBufferException e) {
log.error("Encountered unparsable protobuf for key: {}", entry.getKey().toStringNoTruncate());
continue;
}
// If a column in the row isn't ready for removal, we keep the whole row
if (!StatusUtil.isSafeForRemoval(status)) {
return 0l;
}
Key k = entry.getKey();
k.getColumnFamily(colf);
k.getColumnQualifier(colq);
log.debug("Removing {} {}:{} from replication table", row, colf, colq);
m.putDelete(colf, colq);
Table.ID tableId;
if (StatusSection.NAME.equals(colf)) {
tableId = Table.ID.of(colq.toString());
} else if (WorkSection.NAME.equals(colf)) {
ReplicationTarget target = ReplicationTarget.from(colq);
tableId = target.getSourceTableId();
} else {
throw new RuntimeException("Got unexpected column");
}
if (status.hasCreatedTime()) {
Long timeClosed = tableToTimeCreated.get(tableId);
if (null == timeClosed) {
tableToTimeCreated.put(tableId, status.getCreatedTime());
} else if (timeClosed != status.getCreatedTime()) {
log.warn("Found multiple values for timeClosed for {}: {} and {}", row, timeClosed, status.getCreatedTime());
}
}
recordsRemoved++;
}
List<Mutation> mutations = new ArrayList<>();
mutations.add(m);
for (Entry<Table.ID, Long> entry : tableToTimeCreated.entrySet()) {
log.info("Removing order mutation for table {} at {} for {}", entry.getKey(), entry.getValue(), row.toString());
Mutation orderMutation = OrderSection.createMutation(row.toString(), entry.getValue());
orderMutation.putDelete(OrderSection.NAME, new Text(entry.getKey().getUtf8()));
mutations.add(orderMutation);
}
// or not at all.
try {
bw.addMutations(mutations);
bw.flush();
} catch (MutationsRejectedException e) {
log.error("Could not submit mutation to remove columns for {} in replication table", row, e);
return 0l;
}
return recordsRemoved;
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class WorkMaker method addWorkRecord.
protected void addWorkRecord(Text file, Value v, Map<String, String> targets, Table.ID sourceTableId) {
log.info("Adding work records for {} to targets {}", file, targets);
try {
Mutation m = new Mutation(file);
ReplicationTarget target = new ReplicationTarget();
DataOutputBuffer buffer = new DataOutputBuffer();
Text t = new Text();
for (Entry<String, String> entry : targets.entrySet()) {
buffer.reset();
// Set up the writable
target.setPeerName(entry.getKey());
target.setRemoteIdentifier(entry.getValue());
target.setSourceTableId(sourceTableId);
target.write(buffer);
// Throw it in a text for the mutation
t.set(buffer.getData(), 0, buffer.getLength());
// Add it to the work section
WorkSection.add(m, t, v);
}
try {
writer.addMutation(m);
} catch (MutationsRejectedException e) {
log.warn("Failed to write work mutations for replication, will retry", e);
}
} catch (IOException e) {
log.warn("Failed to serialize data to Text, will retry", e);
} finally {
try {
writer.flush();
} catch (MutationsRejectedException e) {
log.warn("Failed to write work mutations for replication, will retry", e);
}
}
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class DistributedWorkQueueWorkAssignerHelperTest method queueKeySerialization.
@Test
public void queueKeySerialization() {
Path p = new Path("/accumulo/wals/tserver+port/" + UUID.randomUUID().toString());
ReplicationTarget target = new ReplicationTarget("cluster1", "table1", Table.ID.of("1"));
String key = DistributedWorkQueueWorkAssignerHelper.getQueueKey(p.toString(), target);
Entry<String, ReplicationTarget> result = DistributedWorkQueueWorkAssignerHelper.fromQueueKey(key);
Assert.assertEquals(p.toString(), result.getKey());
Assert.assertEquals(target, result.getValue());
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class Metrics2ReplicationMetrics method getNumFilesPendingReplication.
protected int getNumFilesPendingReplication() {
// The total set of configured targets
Set<ReplicationTarget> allConfiguredTargets = replicationUtil.getReplicationTargets();
// Number of files per target we have to replicate
Map<ReplicationTarget, Long> targetCounts = replicationUtil.getPendingReplications();
int filesPending = 0;
// Sum pending replication over all targets
for (ReplicationTarget configuredTarget : allConfiguredTargets) {
Long numFiles = targetCounts.get(configuredTarget);
if (null != numFiles) {
filesPending += numFiles;
}
}
return filesPending;
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class ReplicationMetrics method getNumFilesPendingReplication.
@Override
public int getNumFilesPendingReplication() {
if (TableState.ONLINE != Tables.getTableState(master.getInstance(), ReplicationTable.ID)) {
return 0;
}
// Get all of the configured replication peers
Map<String, String> peers = replicationUtil.getPeers();
// A quick lookup to see if have any replication peer configured
if (peers.isEmpty()) {
return 0;
}
// The total set of configured targets
Set<ReplicationTarget> allConfiguredTargets = replicationUtil.getReplicationTargets();
// Number of files per target we have to replicate
Map<ReplicationTarget, Long> targetCounts = replicationUtil.getPendingReplications();
int filesPending = 0;
// Sum pending replication over all targets
for (ReplicationTarget configuredTarget : allConfiguredTargets) {
Long numFiles = targetCounts.get(configuredTarget);
if (null != numFiles) {
filesPending += numFiles;
}
}
return filesPending;
}
Aggregations