Search in sources :

Example 1 with CheckedBiFunction

use of org.apache.hadoop.hdds.scm.storage.CheckedBiFunction in project ozone by apache.

the class XceiverClientSpi method sendCommand.

/**
 * Sends a given command to server and gets the reply back along with
 * the server associated info.
 * @param request Request
 * @param validators functions to validate the response
 * @return Response to the command
 * @throws IOException
 */
public ContainerCommandResponseProto sendCommand(ContainerCommandRequestProto request, List<CheckedBiFunction> validators) throws IOException {
    try {
        XceiverClientReply reply;
        reply = sendCommandAsync(request);
        ContainerCommandResponseProto responseProto = reply.getResponse().get();
        for (CheckedBiFunction function : validators) {
            function.apply(request, responseProto);
        }
        return responseProto;
    } catch (InterruptedException e) {
        // Re-interrupt the thread while catching InterruptedException
        Thread.currentThread().interrupt();
        throw getIOExceptionForSendCommand(request, e);
    } catch (ExecutionException e) {
        throw getIOExceptionForSendCommand(request, e);
    }
}
Also used : CheckedBiFunction(org.apache.hadoop.hdds.scm.storage.CheckedBiFunction) ExecutionException(java.util.concurrent.ExecutionException) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)

Example 2 with CheckedBiFunction

use of org.apache.hadoop.hdds.scm.storage.CheckedBiFunction in project ozone by apache.

the class XceiverClientGrpc method sendCommandWithRetry.

private XceiverClientReply sendCommandWithRetry(ContainerCommandRequestProto request, List<CheckedBiFunction> validators) throws IOException {
    ContainerCommandResponseProto responseProto = null;
    IOException ioException = null;
    // In case of an exception or an error, we will try to read from the
    // datanodes in the pipeline in a round robin fashion.
    // TODO: cache the correct leader info in here, so that any subsequent calls
    // should first go to leader
    XceiverClientReply reply = new XceiverClientReply(null);
    List<DatanodeDetails> datanodeList = null;
    DatanodeBlockID blockID = null;
    if (request.getCmdType() == ContainerProtos.Type.GetBlock) {
        blockID = request.getGetBlock().getBlockID();
    } else if (request.getCmdType() == ContainerProtos.Type.ReadChunk) {
        blockID = request.getReadChunk().getBlockID();
    } else if (request.getCmdType() == ContainerProtos.Type.GetSmallFile) {
        blockID = request.getGetSmallFile().getBlock().getBlockID();
    }
    if (blockID != null) {
        // Check if the DN to which the GetBlock command was sent has been cached.
        DatanodeDetails cachedDN = getBlockDNcache.get(blockID);
        if (cachedDN != null) {
            datanodeList = pipeline.getNodes();
            int getBlockDNCacheIndex = datanodeList.indexOf(cachedDN);
            if (getBlockDNCacheIndex > 0) {
                // Pull the Cached DN to the top of the DN list
                Collections.swap(datanodeList, 0, getBlockDNCacheIndex);
            }
        }
    }
    if (datanodeList == null) {
        if (topologyAwareRead) {
            datanodeList = pipeline.getNodesInOrder();
        } else {
            datanodeList = pipeline.getNodes();
            // Shuffle datanode list so that clients do not read in the same order
            // every time.
            Collections.shuffle(datanodeList);
        }
    }
    for (DatanodeDetails dn : datanodeList) {
        try {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Executing command {} on datanode {}", processForDebug(request), dn);
            }
            // In case the command gets retried on a 2nd datanode,
            // sendCommandAsyncCall will create a new channel and async stub
            // in case these don't exist for the specific datanode.
            reply.addDatanode(dn);
            responseProto = sendCommandAsync(request, dn).getResponse().get();
            if (validators != null && !validators.isEmpty()) {
                for (CheckedBiFunction validator : validators) {
                    validator.apply(request, responseProto);
                }
            }
            if (request.getCmdType() == ContainerProtos.Type.GetBlock) {
                DatanodeBlockID getBlockID = request.getGetBlock().getBlockID();
                getBlockDNcache.put(getBlockID, dn);
            }
            break;
        } catch (IOException e) {
            ioException = e;
            responseProto = null;
            if (LOG.isDebugEnabled()) {
                LOG.debug("Failed to execute command {} on datanode {}", processForDebug(request), dn, e);
            }
        } catch (ExecutionException e) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Failed to execute command {} on datanode {}", processForDebug(request), dn, e);
            }
            if (Status.fromThrowable(e.getCause()).getCode() == Status.UNAUTHENTICATED.getCode()) {
                throw new SCMSecurityException("Failed to authenticate with " + "GRPC XceiverServer with Ozone block token.");
            }
            ioException = new IOException(e);
            responseProto = null;
        } catch (InterruptedException e) {
            LOG.error("Command execution was interrupted ", e);
            Thread.currentThread().interrupt();
            responseProto = null;
        }
    }
    if (responseProto != null) {
        reply.setResponse(CompletableFuture.completedFuture(responseProto));
        return reply;
    } else {
        Preconditions.checkNotNull(ioException);
        LOG.error("Failed to execute command {} on the pipeline {}.", processForDebug(request), pipeline);
        throw ioException;
    }
}
Also used : CheckedBiFunction(org.apache.hadoop.hdds.scm.storage.CheckedBiFunction) SCMSecurityException(org.apache.hadoop.hdds.security.exception.SCMSecurityException) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) SupplierWithIOException(org.apache.hadoop.hdds.function.SupplierWithIOException) DatanodeBlockID(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID) ExecutionException(java.util.concurrent.ExecutionException) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)

Aggregations

ExecutionException (java.util.concurrent.ExecutionException)2 ContainerCommandResponseProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)2 CheckedBiFunction (org.apache.hadoop.hdds.scm.storage.CheckedBiFunction)2 IOException (java.io.IOException)1 InterruptedIOException (java.io.InterruptedIOException)1 SupplierWithIOException (org.apache.hadoop.hdds.function.SupplierWithIOException)1 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)1 DatanodeBlockID (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID)1 SCMSecurityException (org.apache.hadoop.hdds.security.exception.SCMSecurityException)1