Search in sources :

Example 1 with ClientReadStatusProto

use of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto in project hadoop by apache.

the class DataXceiver method readBlock.

@Override
public void readBlock(final ExtendedBlock block, final Token<BlockTokenIdentifier> blockToken, final String clientName, final long blockOffset, final long length, final boolean sendChecksum, final CachingStrategy cachingStrategy) throws IOException {
    previousOpClientName = clientName;
    long read = 0;
    updateCurrentThreadName("Sending block " + block);
    OutputStream baseStream = getOutputStream();
    DataOutputStream out = getBufferedOutputStream();
    checkAccess(out, true, block, blockToken, Op.READ_BLOCK, BlockTokenIdentifier.AccessMode.READ);
    // send the block
    BlockSender blockSender = null;
    DatanodeRegistration dnR = datanode.getDNRegistrationForBP(block.getBlockPoolId());
    final String clientTraceFmt = clientName.length() > 0 && ClientTraceLog.isInfoEnabled() ? String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress, "%d", "HDFS_READ", clientName, "%d", dnR.getDatanodeUuid(), block, "%d") : dnR + " Served block " + block + " to " + remoteAddress;
    try {
        try {
            blockSender = new BlockSender(block, blockOffset, length, true, false, sendChecksum, datanode, clientTraceFmt, cachingStrategy);
        } catch (IOException e) {
            String msg = "opReadBlock " + block + " received exception " + e;
            LOG.info(msg);
            sendResponse(ERROR, msg);
            throw e;
        }
        // send op status
        writeSuccessWithChecksumInfo(blockSender, new DataOutputStream(getOutputStream()));
        long beginRead = Time.monotonicNow();
        // send data
        read = blockSender.sendBlock(out, baseStream, null);
        long duration = Time.monotonicNow() - beginRead;
        if (blockSender.didSendEntireByteRange()) {
            // to respond with a Status enum.
            try {
                ClientReadStatusProto stat = ClientReadStatusProto.parseFrom(PBHelperClient.vintPrefixed(in));
                if (!stat.hasStatus()) {
                    LOG.warn("Client " + peer.getRemoteAddressString() + " did not send a valid status code after reading. " + "Will close connection.");
                    IOUtils.closeStream(out);
                }
            } catch (IOException ioe) {
                LOG.debug("Error reading client status response. Will close connection.", ioe);
                IOUtils.closeStream(out);
                incrDatanodeNetworkErrors();
            }
        } else {
            IOUtils.closeStream(out);
        }
        datanode.metrics.incrBytesRead((int) read);
        datanode.metrics.incrBlocksRead();
        datanode.metrics.incrTotalReadTime(duration);
    } catch (SocketException ignored) {
        if (LOG.isTraceEnabled()) {
            LOG.trace(dnR + ":Ignoring exception while serving " + block + " to " + remoteAddress, ignored);
        }
        // Its ok for remote side to close the connection anytime.
        datanode.metrics.incrBlocksRead();
        IOUtils.closeStream(out);
    } catch (IOException ioe) {
        /* What exactly should we do here?
       * Earlier version shutdown() datanode if there is disk error.
       */
        if (!(ioe instanceof SocketTimeoutException)) {
            LOG.warn(dnR + ":Got exception while serving " + block + " to " + remoteAddress, ioe);
            incrDatanodeNetworkErrors();
        }
        throw ioe;
    } finally {
        IOUtils.closeStream(blockSender);
    }
    //update metrics
    datanode.metrics.addReadBlockOp(elapsed());
    datanode.metrics.incrReadsFromClient(peer.isLocal(), read);
}
Also used : SocketException(java.net.SocketException) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) SocketTimeoutException(java.net.SocketTimeoutException) DataOutputStream(java.io.DataOutputStream) DataOutputStream(java.io.DataOutputStream) BufferedOutputStream(java.io.BufferedOutputStream) OutputStream(java.io.OutputStream) ByteString(com.google.protobuf.ByteString) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ClientReadStatusProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto)

Aggregations

ByteString (com.google.protobuf.ByteString)1 BufferedOutputStream (java.io.BufferedOutputStream)1 DataOutputStream (java.io.DataOutputStream)1 IOException (java.io.IOException)1 InterruptedIOException (java.io.InterruptedIOException)1 OutputStream (java.io.OutputStream)1 SocketException (java.net.SocketException)1 SocketTimeoutException (java.net.SocketTimeoutException)1 ClientReadStatusProto (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto)1 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)1