use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult in project ozone by apache.
the class DeletedBlockLogImpl method commitTransactions.
/**
* {@inheritDoc}
*
* @param transactionResults - transaction IDs.
* @param dnID - Id of Datanode which has acknowledged
* a delete block command.
* @throws IOException
*/
@Override
public void commitTransactions(List<DeleteBlockTransactionResult> transactionResults, UUID dnID) {
lock.lock();
try {
ArrayList<Long> txIDsToBeDeleted = new ArrayList<>();
Set<UUID> dnsWithCommittedTxn;
for (DeleteBlockTransactionResult transactionResult : transactionResults) {
if (isTransactionFailed(transactionResult)) {
metrics.incrBlockDeletionTransactionFailure();
continue;
}
try {
metrics.incrBlockDeletionTransactionSuccess();
long txID = transactionResult.getTxID();
// set of dns which have successfully committed transaction txId.
dnsWithCommittedTxn = transactionToDNsCommitMap.get(txID);
final ContainerID containerId = ContainerID.valueOf(transactionResult.getContainerID());
if (dnsWithCommittedTxn == null) {
// Mostly likely it's a retried delete command response.
if (LOG.isDebugEnabled()) {
LOG.debug("Transaction txId={} commit by dnId={} for containerID={}" + " failed. Corresponding entry not found.", txID, dnID, containerId);
}
continue;
}
dnsWithCommittedTxn.add(dnID);
final ContainerInfo container = containerManager.getContainer(containerId);
final Set<ContainerReplica> replicas = containerManager.getContainerReplicas(containerId);
// the nodes returned in the pipeline match the replication factor.
if (min(replicas.size(), dnsWithCommittedTxn.size()) >= container.getReplicationConfig().getRequiredNodes()) {
List<UUID> containerDns = replicas.stream().map(ContainerReplica::getDatanodeDetails).map(DatanodeDetails::getUuid).collect(Collectors.toList());
if (dnsWithCommittedTxn.containsAll(containerDns)) {
transactionToDNsCommitMap.remove(txID);
transactionToRetryCountMap.remove(txID);
if (LOG.isDebugEnabled()) {
LOG.debug("Purging txId={} from block deletion log", txID);
}
txIDsToBeDeleted.add(txID);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Datanode txId={} containerId={} committed by dnId={}", txID, containerId, dnID);
}
} catch (IOException e) {
LOG.warn("Could not commit delete block transaction: " + transactionResult.getTxID(), e);
}
}
try {
deletedBlockLogStateManager.removeTransactionsFromDB(txIDsToBeDeleted);
metrics.incrBlockDeletionTransactionCompleted(txIDsToBeDeleted.size());
} catch (IOException e) {
LOG.warn("Could not commit delete block transactions: " + txIDsToBeDeleted, e);
}
} finally {
lock.unlock();
}
}
use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult in project ozone by apache.
the class DeleteBlocksCommandHandler method processCmd.
private void processCmd(DeleteCmdInfo cmd) {
LOG.debug("Processing block deletion command.");
ContainerBlocksDeletionACKProto blockDeletionACK = null;
long startTime = Time.monotonicNow();
boolean cmdExecuted = false;
try {
// move blocks to deleting state.
// this is a metadata update, the actual deletion happens in another
// recycling thread.
List<DeletedBlocksTransaction> containerBlocks = cmd.getCmd().blocksTobeDeleted();
DeletedContainerBlocksSummary summary = DeletedContainerBlocksSummary.getFrom(containerBlocks);
LOG.info("Start to delete container blocks, TXIDs={}, " + "numOfContainers={}, numOfBlocks={}", summary.getTxIDSummary(), summary.getNumOfContainers(), summary.getNumOfBlocks());
ContainerBlocksDeletionACKProto.Builder resultBuilder = ContainerBlocksDeletionACKProto.newBuilder();
List<Future> futures = new ArrayList<>();
for (int i = 0; i < containerBlocks.size(); i++) {
DeletedBlocksTransaction tx = containerBlocks.get(i);
Future future = executor.submit(new ProcessTransactionTask(tx, resultBuilder));
futures.add(future);
}
// Wait for tasks to finish
futures.forEach(f -> {
try {
f.get();
} catch (InterruptedException | ExecutionException e) {
LOG.error("task failed.", e);
Thread.currentThread().interrupt();
}
});
resultBuilder.setDnId(cmd.getContext().getParent().getDatanodeDetails().getUuid().toString());
blockDeletionACK = resultBuilder.build();
// TODO Or we should wait until the blocks are actually deleted?
if (!containerBlocks.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Sending following block deletion ACK to SCM");
for (DeleteBlockTransactionResult result : blockDeletionACK.getResultsList()) {
LOG.debug("{} : {}", result.getTxID(), result.getSuccess());
}
}
}
cmdExecuted = true;
} finally {
final ContainerBlocksDeletionACKProto deleteAck = blockDeletionACK;
final boolean status = cmdExecuted;
Consumer<CommandStatus> statusUpdater = (cmdStatus) -> {
cmdStatus.setStatus(status);
((DeleteBlockCommandStatus) cmdStatus).setBlocksDeletionAck(deleteAck);
};
updateCommandStatus(cmd.getContext(), cmd.getCmd(), statusUpdater, LOG);
long endTime = Time.monotonicNow();
totalTime += endTime - startTime;
invocationCount++;
}
}
Aggregations