Search in sources :

Example 1 with DeleteBlockTransactionResult

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult in project ozone by apache.

the class DeletedBlockLogImpl method commitTransactions.

/**
 * {@inheritDoc}
 *
 * @param transactionResults - transaction IDs.
 * @param dnID               - Id of Datanode which has acknowledged
 *                           a delete block command.
 * @throws IOException
 */
@Override
public void commitTransactions(List<DeleteBlockTransactionResult> transactionResults, UUID dnID) {
    lock.lock();
    try {
        ArrayList<Long> txIDsToBeDeleted = new ArrayList<>();
        Set<UUID> dnsWithCommittedTxn;
        for (DeleteBlockTransactionResult transactionResult : transactionResults) {
            if (isTransactionFailed(transactionResult)) {
                metrics.incrBlockDeletionTransactionFailure();
                continue;
            }
            try {
                metrics.incrBlockDeletionTransactionSuccess();
                long txID = transactionResult.getTxID();
                // set of dns which have successfully committed transaction txId.
                dnsWithCommittedTxn = transactionToDNsCommitMap.get(txID);
                final ContainerID containerId = ContainerID.valueOf(transactionResult.getContainerID());
                if (dnsWithCommittedTxn == null) {
                    // Mostly likely it's a retried delete command response.
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Transaction txId={} commit by dnId={} for containerID={}" + " failed. Corresponding entry not found.", txID, dnID, containerId);
                    }
                    continue;
                }
                dnsWithCommittedTxn.add(dnID);
                final ContainerInfo container = containerManager.getContainer(containerId);
                final Set<ContainerReplica> replicas = containerManager.getContainerReplicas(containerId);
                // the nodes returned in the pipeline match the replication factor.
                if (min(replicas.size(), dnsWithCommittedTxn.size()) >= container.getReplicationConfig().getRequiredNodes()) {
                    List<UUID> containerDns = replicas.stream().map(ContainerReplica::getDatanodeDetails).map(DatanodeDetails::getUuid).collect(Collectors.toList());
                    if (dnsWithCommittedTxn.containsAll(containerDns)) {
                        transactionToDNsCommitMap.remove(txID);
                        transactionToRetryCountMap.remove(txID);
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("Purging txId={} from block deletion log", txID);
                        }
                        txIDsToBeDeleted.add(txID);
                    }
                }
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Datanode txId={} containerId={} committed by dnId={}", txID, containerId, dnID);
                }
            } catch (IOException e) {
                LOG.warn("Could not commit delete block transaction: " + transactionResult.getTxID(), e);
            }
        }
        try {
            deletedBlockLogStateManager.removeTransactionsFromDB(txIDsToBeDeleted);
            metrics.incrBlockDeletionTransactionCompleted(txIDsToBeDeleted.size());
        } catch (IOException e) {
            LOG.warn("Could not commit delete block transactions: " + txIDsToBeDeleted, e);
        }
    } finally {
        lock.unlock();
    }
}
Also used : ArrayList(java.util.ArrayList) IOException(java.io.IOException) DeleteBlockTransactionResult(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult) ContainerID(org.apache.hadoop.hdds.scm.container.ContainerID) ContainerReplica(org.apache.hadoop.hdds.scm.container.ContainerReplica) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) UUID(java.util.UUID)

Example 2 with DeleteBlockTransactionResult

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult in project ozone by apache.

the class DeleteBlocksCommandHandler method processCmd.

private void processCmd(DeleteCmdInfo cmd) {
    LOG.debug("Processing block deletion command.");
    ContainerBlocksDeletionACKProto blockDeletionACK = null;
    long startTime = Time.monotonicNow();
    boolean cmdExecuted = false;
    try {
        // move blocks to deleting state.
        // this is a metadata update, the actual deletion happens in another
        // recycling thread.
        List<DeletedBlocksTransaction> containerBlocks = cmd.getCmd().blocksTobeDeleted();
        DeletedContainerBlocksSummary summary = DeletedContainerBlocksSummary.getFrom(containerBlocks);
        LOG.info("Start to delete container blocks, TXIDs={}, " + "numOfContainers={}, numOfBlocks={}", summary.getTxIDSummary(), summary.getNumOfContainers(), summary.getNumOfBlocks());
        ContainerBlocksDeletionACKProto.Builder resultBuilder = ContainerBlocksDeletionACKProto.newBuilder();
        List<Future> futures = new ArrayList<>();
        for (int i = 0; i < containerBlocks.size(); i++) {
            DeletedBlocksTransaction tx = containerBlocks.get(i);
            Future future = executor.submit(new ProcessTransactionTask(tx, resultBuilder));
            futures.add(future);
        }
        // Wait for tasks to finish
        futures.forEach(f -> {
            try {
                f.get();
            } catch (InterruptedException | ExecutionException e) {
                LOG.error("task failed.", e);
                Thread.currentThread().interrupt();
            }
        });
        resultBuilder.setDnId(cmd.getContext().getParent().getDatanodeDetails().getUuid().toString());
        blockDeletionACK = resultBuilder.build();
        // TODO Or we should wait until the blocks are actually deleted?
        if (!containerBlocks.isEmpty()) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Sending following block deletion ACK to SCM");
                for (DeleteBlockTransactionResult result : blockDeletionACK.getResultsList()) {
                    LOG.debug("{} : {}", result.getTxID(), result.getSuccess());
                }
            }
        }
        cmdExecuted = true;
    } finally {
        final ContainerBlocksDeletionACKProto deleteAck = blockDeletionACK;
        final boolean status = cmdExecuted;
        Consumer<CommandStatus> statusUpdater = (cmdStatus) -> {
            cmdStatus.setStatus(status);
            ((DeleteBlockCommandStatus) cmdStatus).setBlocksDeletionAck(deleteAck);
        };
        updateCommandStatus(cmd.getContext(), cmd.getCmd(), statusUpdater, LOG);
        long endTime = Time.monotonicNow();
        totalTime += endTime - startTime;
        invocationCount++;
    }
}
Also used : ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) DeletedContainerBlocksSummary(org.apache.hadoop.ozone.container.common.helpers.DeletedContainerBlocksSummary) ContainerBlocksDeletionACKProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto) BlockUtils(org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) LoggerFactory(org.slf4j.LoggerFactory) DeleteBlockCommandStatus(org.apache.hadoop.ozone.protocol.commands.DeleteBlockCommandStatus) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) ArrayList(java.util.ArrayList) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) SCMCommandProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) DatanodeStore(org.apache.hadoop.ozone.container.metadata.DatanodeStore) Future(java.util.concurrent.Future) ConfigurationSource(org.apache.hadoop.hdds.conf.ConfigurationSource) DeleteBlocksCommand(org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand) ChunkInfoList(org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList) OzoneContainer(org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer) DeleteBlockTransactionResult(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult) DatanodeStoreSchemaTwoImpl(org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) ExecutorService(java.util.concurrent.ExecutorService) CONTAINER_NOT_FOUND(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_FOUND) BatchOperation(org.apache.hadoop.hdds.utils.db.BatchOperation) Logger(org.slf4j.Logger) IOException(java.io.IOException) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) DeletedBlocksTransaction(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction) OzoneConsts(org.apache.hadoop.ozone.OzoneConsts) Daemon(org.apache.hadoop.util.Daemon) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) List(java.util.List) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) Table(org.apache.hadoop.hdds.utils.db.Table) CommandStatus(org.apache.hadoop.ozone.protocol.commands.CommandStatus) Time(org.apache.hadoop.util.Time) SCMConnectionManager(org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager) SCMCommand(org.apache.hadoop.ozone.protocol.commands.SCMCommand) SCHEMA_V2(org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2) SCHEMA_V1(org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1) ArrayList(java.util.ArrayList) DeletedContainerBlocksSummary(org.apache.hadoop.ozone.container.common.helpers.DeletedContainerBlocksSummary) DeleteBlockTransactionResult(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult) ContainerBlocksDeletionACKProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto) Future(java.util.concurrent.Future) DeleteBlockCommandStatus(org.apache.hadoop.ozone.protocol.commands.DeleteBlockCommandStatus) CommandStatus(org.apache.hadoop.ozone.protocol.commands.CommandStatus) ExecutionException(java.util.concurrent.ExecutionException) DeletedBlocksTransaction(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction)

Aggregations

IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 DeleteBlockTransactionResult (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult)2 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)1 List (java.util.List)1 UUID (java.util.UUID)1 ExecutionException (java.util.concurrent.ExecutionException)1 ExecutorService (java.util.concurrent.ExecutorService)1 Future (java.util.concurrent.Future)1 LinkedBlockingQueue (java.util.concurrent.LinkedBlockingQueue)1 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)1 TimeUnit (java.util.concurrent.TimeUnit)1 Consumer (java.util.function.Consumer)1 ConfigurationSource (org.apache.hadoop.hdds.conf.ConfigurationSource)1 ContainerType (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType)1 CONTAINER_NOT_FOUND (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_FOUND)1 ContainerBlocksDeletionACKProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto)1 DeletedBlocksTransaction (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction)1 SCMCommandProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto)1 ContainerID (org.apache.hadoop.hdds.scm.container.ContainerID)1