Search in sources :

Example 1 with ReadOpChecksumInfoProto

use of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto in project hadoop by apache.

the class BlockReaderRemote method newBlockReader.

/**
   * Create a new BlockReader specifically to satisfy a read.
   * This method also sends the OP_READ_BLOCK request.
   *
   * @param file  File location
   * @param block  The block object
   * @param blockToken  The block token for security
   * @param startOffset  The read offset, relative to block head
   * @param len  The number of bytes to read
   * @param verifyChecksum  Whether to verify checksum
   * @param clientName  Client name
   * @param peer  The Peer to use
   * @param datanodeID  The DatanodeID this peer is connected to
   * @return New BlockReader instance, or null on error.
   */
public static BlockReader newBlockReader(String file, ExtendedBlock block, Token<BlockTokenIdentifier> blockToken, long startOffset, long len, boolean verifyChecksum, String clientName, Peer peer, DatanodeID datanodeID, PeerCache peerCache, CachingStrategy cachingStrategy, Tracer tracer, int networkDistance) throws IOException {
    // in and out will be closed when sock is closed (by the caller)
    final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
    new Sender(out).readBlock(block, blockToken, clientName, startOffset, len, verifyChecksum, cachingStrategy);
    //
    // Get bytes in block
    //
    DataInputStream in = new DataInputStream(peer.getInputStream());
    BlockOpResponseProto status = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(in));
    checkSuccess(status, peer, block, file);
    ReadOpChecksumInfoProto checksumInfo = status.getReadOpChecksumInfo();
    DataChecksum checksum = DataTransferProtoUtil.fromProto(checksumInfo.getChecksum());
    //Warning when we get CHECKSUM_NULL?
    // Read the first chunk offset.
    long firstChunkOffset = checksumInfo.getChunkOffset();
    if (firstChunkOffset < 0 || firstChunkOffset > startOffset || firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
        throw new IOException("BlockReader: error in first chunk offset (" + firstChunkOffset + ") startOffset is " + startOffset + " for file " + file);
    }
    return new BlockReaderRemote(file, block.getBlockId(), checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer, datanodeID, peerCache, tracer, networkDistance);
}
Also used : Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) DataOutputStream(java.io.DataOutputStream) BlockOpResponseProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) ReadOpChecksumInfoProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) BufferedOutputStream(java.io.BufferedOutputStream) DataChecksum(org.apache.hadoop.util.DataChecksum)

Example 2 with ReadOpChecksumInfoProto

use of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto in project hadoop by apache.

the class DataXceiver method writeSuccessWithChecksumInfo.

private void writeSuccessWithChecksumInfo(BlockSender blockSender, DataOutputStream out) throws IOException {
    ReadOpChecksumInfoProto ckInfo = ReadOpChecksumInfoProto.newBuilder().setChecksum(DataTransferProtoUtil.toProto(blockSender.getChecksum())).setChunkOffset(blockSender.getOffset()).build();
    BlockOpResponseProto response = BlockOpResponseProto.newBuilder().setStatus(SUCCESS).setReadOpChecksumInfo(ckInfo).build();
    response.writeDelimitedTo(out);
    out.flush();
}
Also used : BlockOpResponseProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) ReadOpChecksumInfoProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)

Example 3 with ReadOpChecksumInfoProto

use of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto in project hadoop by apache.

the class DataXceiver method replaceBlock.

@Override
public void replaceBlock(final ExtendedBlock block, final StorageType storageType, final Token<BlockTokenIdentifier> blockToken, final String delHint, final DatanodeInfo proxySource) throws IOException {
    updateCurrentThreadName("Replacing block " + block + " from " + delHint);
    DataOutputStream replyOut = new DataOutputStream(getOutputStream());
    checkAccess(replyOut, true, block, blockToken, Op.REPLACE_BLOCK, BlockTokenIdentifier.AccessMode.REPLACE);
    if (!dataXceiverServer.balanceThrottler.acquire()) {
        // not able to start
        String msg = "Not able to receive block " + block.getBlockId() + " from " + peer.getRemoteAddressString() + " because threads " + "quota is exceeded.";
        LOG.warn(msg);
        sendResponse(ERROR, msg);
        return;
    }
    Socket proxySock = null;
    DataOutputStream proxyOut = null;
    Status opStatus = SUCCESS;
    String errMsg = null;
    DataInputStream proxyReply = null;
    boolean IoeDuringCopyBlockOperation = false;
    try {
        // Move the block to different storage in the same datanode
        if (proxySource.equals(datanode.getDatanodeId())) {
            ReplicaInfo oldReplica = datanode.data.moveBlockAcrossStorage(block, storageType);
            if (oldReplica != null) {
                LOG.info("Moved " + block + " from StorageType " + oldReplica.getVolume().getStorageType() + " to " + storageType);
            }
        } else {
            block.setNumBytes(dataXceiverServer.estimateBlockSize);
            // get the output stream to the proxy
            final String dnAddr = proxySource.getXferAddr(connectToDnViaHostname);
            if (LOG.isDebugEnabled()) {
                LOG.debug("Connecting to datanode " + dnAddr);
            }
            InetSocketAddress proxyAddr = NetUtils.createSocketAddr(dnAddr);
            proxySock = datanode.newSocket();
            NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout);
            proxySock.setTcpNoDelay(dnConf.getDataTransferServerTcpNoDelay());
            proxySock.setSoTimeout(dnConf.socketTimeout);
            proxySock.setKeepAlive(true);
            OutputStream unbufProxyOut = NetUtils.getOutputStream(proxySock, dnConf.socketWriteTimeout);
            InputStream unbufProxyIn = NetUtils.getInputStream(proxySock);
            DataEncryptionKeyFactory keyFactory = datanode.getDataEncryptionKeyFactoryForBlock(block);
            IOStreamPair saslStreams = datanode.saslClient.socketSend(proxySock, unbufProxyOut, unbufProxyIn, keyFactory, blockToken, proxySource);
            unbufProxyOut = saslStreams.out;
            unbufProxyIn = saslStreams.in;
            proxyOut = new DataOutputStream(new BufferedOutputStream(unbufProxyOut, smallBufferSize));
            proxyReply = new DataInputStream(new BufferedInputStream(unbufProxyIn, ioFileBufferSize));
            /* send request to the proxy */
            IoeDuringCopyBlockOperation = true;
            new Sender(proxyOut).copyBlock(block, blockToken);
            IoeDuringCopyBlockOperation = false;
            // receive the response from the proxy
            BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(proxyReply));
            String logInfo = "copy block " + block + " from " + proxySock.getRemoteSocketAddress();
            DataTransferProtoUtil.checkBlockOpStatus(copyResponse, logInfo, true);
            // get checksum info about the block we're copying
            ReadOpChecksumInfoProto checksumInfo = copyResponse.getReadOpChecksumInfo();
            DataChecksum remoteChecksum = DataTransferProtoUtil.fromProto(checksumInfo.getChecksum());
            // open a block receiver and check if the block does not exist
            setCurrentBlockReceiver(getBlockReceiver(block, storageType, proxyReply, proxySock.getRemoteSocketAddress().toString(), proxySock.getLocalSocketAddress().toString(), null, 0, 0, 0, "", null, datanode, remoteChecksum, CachingStrategy.newDropBehind(), false, false));
            // receive a block
            blockReceiver.receiveBlock(null, null, replyOut, null, dataXceiverServer.balanceThrottler, null, true);
            // notify name node
            final Replica r = blockReceiver.getReplica();
            datanode.notifyNamenodeReceivedBlock(block, delHint, r.getStorageUuid(), r.isOnTransientStorage());
            LOG.info("Moved " + block + " from " + peer.getRemoteAddressString() + ", delHint=" + delHint);
        }
    } catch (IOException ioe) {
        opStatus = ERROR;
        if (ioe instanceof BlockPinningException) {
            opStatus = Status.ERROR_BLOCK_PINNED;
        }
        errMsg = "opReplaceBlock " + block + " received exception " + ioe;
        LOG.info(errMsg);
        if (!IoeDuringCopyBlockOperation) {
            // Don't double count IO errors
            incrDatanodeNetworkErrors();
        }
        throw ioe;
    } finally {
        // receive the last byte that indicates the proxy released its thread resource
        if (opStatus == SUCCESS && proxyReply != null) {
            try {
                proxyReply.readChar();
            } catch (IOException ignored) {
            }
        }
        // now release the thread resource
        dataXceiverServer.balanceThrottler.release();
        // send response back
        try {
            sendResponse(opStatus, errMsg);
        } catch (IOException ioe) {
            LOG.warn("Error writing reply back to " + peer.getRemoteAddressString());
            incrDatanodeNetworkErrors();
        }
        IOUtils.closeStream(proxyOut);
        IOUtils.closeStream(blockReceiver);
        IOUtils.closeStream(proxyReply);
        IOUtils.closeStream(replyOut);
    }
    //update metrics
    datanode.metrics.addReplaceBlockOp(elapsed());
}
Also used : Status(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status) DataOutputStream(java.io.DataOutputStream) InetSocketAddress(java.net.InetSocketAddress) BufferedInputStream(java.io.BufferedInputStream) DataInputStream(java.io.DataInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) DataOutputStream(java.io.DataOutputStream) BufferedOutputStream(java.io.BufferedOutputStream) OutputStream(java.io.OutputStream) BlockOpResponseProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) ReadOpChecksumInfoProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) ByteString(com.google.protobuf.ByteString) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) DataEncryptionKeyFactory(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory) BlockPinningException(org.apache.hadoop.hdfs.protocol.datatransfer.BlockPinningException) DataChecksum(org.apache.hadoop.util.DataChecksum) Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) BufferedInputStream(java.io.BufferedInputStream) BufferedOutputStream(java.io.BufferedOutputStream) DomainSocket(org.apache.hadoop.net.unix.DomainSocket) Socket(java.net.Socket)

Aggregations

BlockOpResponseProto (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)3 ReadOpChecksumInfoProto (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)3 BufferedOutputStream (java.io.BufferedOutputStream)2 DataInputStream (java.io.DataInputStream)2 DataOutputStream (java.io.DataOutputStream)2 IOException (java.io.IOException)2 Sender (org.apache.hadoop.hdfs.protocol.datatransfer.Sender)2 DataChecksum (org.apache.hadoop.util.DataChecksum)2 ByteString (com.google.protobuf.ByteString)1 BufferedInputStream (java.io.BufferedInputStream)1 FileInputStream (java.io.FileInputStream)1 InputStream (java.io.InputStream)1 InterruptedIOException (java.io.InterruptedIOException)1 OutputStream (java.io.OutputStream)1 InetSocketAddress (java.net.InetSocketAddress)1 Socket (java.net.Socket)1 BlockPinningException (org.apache.hadoop.hdfs.protocol.datatransfer.BlockPinningException)1 IOStreamPair (org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair)1 DataEncryptionKeyFactory (org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory)1 Status (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status)1