use of org.apache.accumulo.core.client.MutationsRejectedException in project accumulo by apache.
the class MetaDataStateStore method suspend.
@Override
public void suspend(Collection<TabletLocationState> tablets, Map<TServerInstance, List<Path>> logsForDeadServers, long suspensionTimestamp) throws DistributedStoreException {
BatchWriter writer = createBatchWriter();
try {
for (TabletLocationState tls : tablets) {
Mutation m = new Mutation(tls.extent.getMetadataEntry());
if (tls.current != null) {
tls.current.clearLocation(m);
if (logsForDeadServers != null) {
List<Path> logs = logsForDeadServers.get(tls.current);
if (logs != null) {
for (Path log : logs) {
LogEntry entry = new LogEntry(tls.extent, 0, tls.current.hostPort(), log.toString());
m.put(entry.getColumnFamily(), entry.getColumnQualifier(), entry.getValue());
}
}
}
if (suspensionTimestamp >= 0) {
SuspendingTServer suspender = new SuspendingTServer(tls.current.getLocation(), suspensionTimestamp);
suspender.setSuspension(m);
}
}
if (tls.suspend != null && suspensionTimestamp < 0) {
SuspendingTServer.clearSuspension(m);
}
if (tls.future != null) {
tls.future.clearFutureLocation(m);
}
writer.addMutation(m);
}
} catch (Exception ex) {
throw new DistributedStoreException(ex);
} finally {
try {
writer.close();
} catch (MutationsRejectedException e) {
throw new DistributedStoreException(e);
}
}
}
use of org.apache.accumulo.core.client.MutationsRejectedException in project accumulo by apache.
the class MetaDataStateStore method unsuspend.
@Override
public void unsuspend(Collection<TabletLocationState> tablets) throws DistributedStoreException {
BatchWriter writer = createBatchWriter();
try {
for (TabletLocationState tls : tablets) {
if (tls.suspend != null) {
continue;
}
Mutation m = new Mutation(tls.extent.getMetadataEntry());
SuspendingTServer.clearSuspension(m);
writer.addMutation(m);
}
} catch (Exception ex) {
throw new DistributedStoreException(ex);
} finally {
try {
writer.close();
} catch (MutationsRejectedException e) {
throw new DistributedStoreException(e);
}
}
}
use of org.apache.accumulo.core.client.MutationsRejectedException in project accumulo by apache.
the class MetaDataStateStore method setFutureLocations.
@Override
public void setFutureLocations(Collection<Assignment> assignments) throws DistributedStoreException {
BatchWriter writer = createBatchWriter();
try {
for (Assignment assignment : assignments) {
Mutation m = new Mutation(assignment.tablet.getMetadataEntry());
SuspendingTServer.clearSuspension(m);
assignment.server.putFutureLocation(m);
writer.addMutation(m);
}
} catch (Exception ex) {
throw new DistributedStoreException(ex);
} finally {
try {
writer.close();
} catch (MutationsRejectedException e) {
throw new DistributedStoreException(e);
}
}
}
use of org.apache.accumulo.core.client.MutationsRejectedException in project accumulo by apache.
the class FinishedWorkUpdater method run.
@Override
public void run() {
log.debug("Looking for finished replication work");
if (!ReplicationTable.isOnline(conn)) {
log.debug("Replication table is not yet online, will retry");
return;
}
BatchScanner bs;
BatchWriter replBw;
try {
bs = ReplicationTable.getBatchScanner(conn, 4);
replBw = ReplicationTable.getBatchWriter(conn);
} catch (ReplicationTableOfflineException e) {
log.debug("Table is no longer online, will retry");
return;
}
IteratorSetting cfg = new IteratorSetting(50, WholeRowIterator.class);
bs.addScanIterator(cfg);
WorkSection.limit(bs);
bs.setRanges(Collections.singleton(new Range()));
try {
for (Entry<Key, Value> serializedRow : bs) {
SortedMap<Key, Value> wholeRow;
try {
wholeRow = WholeRowIterator.decodeRow(serializedRow.getKey(), serializedRow.getValue());
} catch (IOException e) {
log.warn("Could not deserialize whole row with key {}", serializedRow.getKey().toStringNoTruncate(), e);
continue;
}
log.debug("Processing work progress for {} with {} columns", serializedRow.getKey().getRow(), wholeRow.size());
Map<Table.ID, Long> tableIdToProgress = new HashMap<>();
boolean error = false;
Text buffer = new Text();
// We want to determine what the minimum point that all Work entries have replicated to
for (Entry<Key, Value> entry : wholeRow.entrySet()) {
Status status;
try {
status = Status.parseFrom(entry.getValue().get());
} catch (InvalidProtocolBufferException e) {
log.warn("Could not deserialize protobuf for {}", entry.getKey(), e);
error = true;
break;
}
// Get the replication target for the work record
entry.getKey().getColumnQualifier(buffer);
ReplicationTarget target = ReplicationTarget.from(buffer);
// Initialize the value in the map if we don't have one
if (!tableIdToProgress.containsKey(target.getSourceTableId())) {
tableIdToProgress.put(target.getSourceTableId(), Long.MAX_VALUE);
}
// Find the minimum value for begin (everyone has replicated up to this offset in the file)
tableIdToProgress.put(target.getSourceTableId(), Math.min(tableIdToProgress.get(target.getSourceTableId()), status.getBegin()));
}
if (error) {
continue;
}
// Update the replication table for each source table we found work records for
for (Entry<Table.ID, Long> entry : tableIdToProgress.entrySet()) {
// If the progress is 0, then no one has replicated anything, and we don't need to update anything
if (0 == entry.getValue()) {
continue;
}
serializedRow.getKey().getRow(buffer);
log.debug("For {}, source table ID {} has replicated through {}", serializedRow.getKey().getRow(), entry.getKey(), entry.getValue());
Mutation replMutation = new Mutation(buffer);
// Set that we replicated at least this much data, ignoring the other fields
Status updatedStatus = StatusUtil.replicated(entry.getValue());
Value serializedUpdatedStatus = ProtobufUtil.toValue(updatedStatus);
// Pull the sourceTableId into a Text
Table.ID srcTableId = entry.getKey();
// Make the mutation
StatusSection.add(replMutation, srcTableId, serializedUpdatedStatus);
log.debug("Updating replication status entry for {} with {}", serializedRow.getKey().getRow(), ProtobufUtil.toString(updatedStatus));
try {
replBw.addMutation(replMutation);
} catch (MutationsRejectedException e) {
log.error("Error writing mutations to update replication Status messages in StatusSection, will retry", e);
return;
}
}
}
} finally {
log.debug("Finished updating files with completed replication work");
bs.close();
try {
replBw.close();
} catch (MutationsRejectedException e) {
log.error("Error writing mutations to update replication Status messages in StatusSection, will retry", e);
}
}
}
use of org.apache.accumulo.core.client.MutationsRejectedException in project accumulo by apache.
the class StatusMaker method addStatusRecord.
/**
* Create a status record in the replication table
*/
protected boolean addStatusRecord(Text file, Table.ID tableId, Value v) {
try {
Mutation m = new Mutation(file);
m.put(StatusSection.NAME, new Text(tableId.getUtf8()), v);
try {
replicationWriter.addMutation(m);
} catch (MutationsRejectedException e) {
log.warn("Failed to write work mutations for replication, will retry", e);
return false;
}
} finally {
try {
replicationWriter.flush();
} catch (MutationsRejectedException e) {
log.warn("Failed to write work mutations for replication, will retry", e);
return false;
}
}
return true;
}
Aggregations