Search in sources :

Example 1 with BlockOpResponseProto

use of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto in project hadoop by apache.

the class TestBlockReplacement method replaceBlock.

/*
   * Replace block
   */
private boolean replaceBlock(ExtendedBlock block, DatanodeInfo source, DatanodeInfo sourceProxy, DatanodeInfo destination, StorageType targetStorageType, Status opStatus) throws IOException, SocketException {
    Socket sock = new Socket();
    try {
        sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()), HdfsConstants.READ_TIMEOUT);
        sock.setKeepAlive(true);
        // sendRequest
        DataOutputStream out = new DataOutputStream(sock.getOutputStream());
        new Sender(out).replaceBlock(block, targetStorageType, BlockTokenSecretManager.DUMMY_TOKEN, source.getDatanodeUuid(), sourceProxy);
        out.flush();
        // receiveResponse
        DataInputStream reply = new DataInputStream(sock.getInputStream());
        BlockOpResponseProto proto = BlockOpResponseProto.parseDelimitedFrom(reply);
        while (proto.getStatus() == Status.IN_PROGRESS) {
            proto = BlockOpResponseProto.parseDelimitedFrom(reply);
        }
        return proto.getStatus() == opStatus;
    } finally {
        sock.close();
    }
}
Also used : Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) DataOutputStream(java.io.DataOutputStream) BlockOpResponseProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) DataInputStream(java.io.DataInputStream) Socket(java.net.Socket)

Example 2 with BlockOpResponseProto

use of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto in project hadoop by apache.

the class BlockReaderRemote method newBlockReader.

/**
   * Create a new BlockReader specifically to satisfy a read.
   * This method also sends the OP_READ_BLOCK request.
   *
   * @param file  File location
   * @param block  The block object
   * @param blockToken  The block token for security
   * @param startOffset  The read offset, relative to block head
   * @param len  The number of bytes to read
   * @param verifyChecksum  Whether to verify checksum
   * @param clientName  Client name
   * @param peer  The Peer to use
   * @param datanodeID  The DatanodeID this peer is connected to
   * @return New BlockReader instance, or null on error.
   */
public static BlockReader newBlockReader(String file, ExtendedBlock block, Token<BlockTokenIdentifier> blockToken, long startOffset, long len, boolean verifyChecksum, String clientName, Peer peer, DatanodeID datanodeID, PeerCache peerCache, CachingStrategy cachingStrategy, Tracer tracer, int networkDistance) throws IOException {
    // in and out will be closed when sock is closed (by the caller)
    final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
    new Sender(out).readBlock(block, blockToken, clientName, startOffset, len, verifyChecksum, cachingStrategy);
    //
    // Get bytes in block
    //
    DataInputStream in = new DataInputStream(peer.getInputStream());
    BlockOpResponseProto status = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(in));
    checkSuccess(status, peer, block, file);
    ReadOpChecksumInfoProto checksumInfo = status.getReadOpChecksumInfo();
    DataChecksum checksum = DataTransferProtoUtil.fromProto(checksumInfo.getChecksum());
    //Warning when we get CHECKSUM_NULL?
    // Read the first chunk offset.
    long firstChunkOffset = checksumInfo.getChunkOffset();
    if (firstChunkOffset < 0 || firstChunkOffset > startOffset || firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
        throw new IOException("BlockReader: error in first chunk offset (" + firstChunkOffset + ") startOffset is " + startOffset + " for file " + file);
    }
    return new BlockReaderRemote(file, block.getBlockId(), checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer, datanodeID, peerCache, tracer, networkDistance);
}
Also used : Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) DataOutputStream(java.io.DataOutputStream) BlockOpResponseProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) ReadOpChecksumInfoProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) BufferedOutputStream(java.io.BufferedOutputStream) DataChecksum(org.apache.hadoop.util.DataChecksum)

Example 3 with BlockOpResponseProto

use of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto in project hadoop by apache.

the class DFSClient method inferChecksumTypeByReading.

/**
   * Infer the checksum type for a replica by sending an OP_READ_BLOCK
   * for the first byte of that replica. This is used for compatibility
   * with older HDFS versions which did not include the checksum type in
   * OpBlockChecksumResponseProto.
   *
   * @param lb the located block
   * @param dn the connected datanode
   * @return the inferred checksum type
   * @throws IOException if an error occurs
   */
protected Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn) throws IOException {
    IOStreamPair pair = connectToDN(dn, dfsClientConf.getSocketTimeout(), lb.getBlockToken());
    try {
        new Sender((DataOutputStream) pair.out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true, CachingStrategy.newDefaultStrategy());
        final BlockOpResponseProto reply = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(pair.in));
        String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
        DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);
        return PBHelperClient.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
    } finally {
        IOUtilsClient.cleanup(null, pair.in, pair.out);
    }
}
Also used : Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) DataOutputStream(java.io.DataOutputStream) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) BlockOpResponseProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)

Example 4 with BlockOpResponseProto

use of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto in project hadoop by apache.

the class DataXceiver method writeBlock.

@Override
public void writeBlock(final ExtendedBlock block, final StorageType storageType, final Token<BlockTokenIdentifier> blockToken, final String clientname, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes, final DatanodeInfo srcDataNode, final BlockConstructionStage stage, final int pipelineSize, final long minBytesRcvd, final long maxBytesRcvd, final long latestGenerationStamp, DataChecksum requestedChecksum, CachingStrategy cachingStrategy, boolean allowLazyPersist, final boolean pinning, final boolean[] targetPinnings) throws IOException {
    previousOpClientName = clientname;
    updateCurrentThreadName("Receiving block " + block);
    final boolean isDatanode = clientname.length() == 0;
    final boolean isClient = !isDatanode;
    final boolean isTransfer = stage == BlockConstructionStage.TRANSFER_RBW || stage == BlockConstructionStage.TRANSFER_FINALIZED;
    allowLazyPersist = allowLazyPersist && (dnConf.getAllowNonLocalLazyPersist() || peer.isLocal());
    long size = 0;
    // reply to upstream datanode or client 
    final DataOutputStream replyOut = getBufferedOutputStream();
    checkAccess(replyOut, isClient, block, blockToken, Op.WRITE_BLOCK, BlockTokenIdentifier.AccessMode.WRITE);
    // check single target for transfer-RBW/Finalized 
    if (isTransfer && targets.length > 0) {
        throw new IOException(stage + " does not support multiple targets " + Arrays.asList(targets));
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("opWriteBlock: stage=" + stage + ", clientname=" + clientname + "\n  block  =" + block + ", newGs=" + latestGenerationStamp + ", bytesRcvd=[" + minBytesRcvd + ", " + maxBytesRcvd + "]" + "\n  targets=" + Arrays.asList(targets) + "; pipelineSize=" + pipelineSize + ", srcDataNode=" + srcDataNode + ", pinning=" + pinning);
        LOG.debug("isDatanode=" + isDatanode + ", isClient=" + isClient + ", isTransfer=" + isTransfer);
        LOG.debug("writeBlock receive buf size " + peer.getReceiveBufferSize() + " tcp no delay " + peer.getTcpNoDelay());
    }
    // We later mutate block's generation stamp and length, but we need to
    // forward the original version of the block to downstream mirrors, so
    // make a copy here.
    final ExtendedBlock originalBlock = new ExtendedBlock(block);
    if (block.getNumBytes() == 0) {
        block.setNumBytes(dataXceiverServer.estimateBlockSize);
    }
    LOG.info("Receiving " + block + " src: " + remoteAddress + " dest: " + localAddress);
    // stream to next target
    DataOutputStream mirrorOut = null;
    // reply from next target
    DataInputStream mirrorIn = null;
    // socket to next target
    Socket mirrorSock = null;
    // the name:port of next target
    String mirrorNode = null;
    // first datanode that failed in connection setup
    String firstBadLink = "";
    Status mirrorInStatus = SUCCESS;
    final String storageUuid;
    final boolean isOnTransientStorage;
    try {
        final Replica replica;
        if (isDatanode || stage != BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
            // open a block receiver
            setCurrentBlockReceiver(getBlockReceiver(block, storageType, in, peer.getRemoteAddressString(), peer.getLocalAddressString(), stage, latestGenerationStamp, minBytesRcvd, maxBytesRcvd, clientname, srcDataNode, datanode, requestedChecksum, cachingStrategy, allowLazyPersist, pinning));
            replica = blockReceiver.getReplica();
        } else {
            replica = datanode.data.recoverClose(block, latestGenerationStamp, minBytesRcvd);
        }
        storageUuid = replica.getStorageUuid();
        isOnTransientStorage = replica.isOnTransientStorage();
        //
        if (targets.length > 0) {
            InetSocketAddress mirrorTarget = null;
            // Connect to backup machine
            mirrorNode = targets[0].getXferAddr(connectToDnViaHostname);
            if (LOG.isDebugEnabled()) {
                LOG.debug("Connecting to datanode " + mirrorNode);
            }
            mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
            mirrorSock = datanode.newSocket();
            try {
                DataNodeFaultInjector.get().failMirrorConnection();
                int timeoutValue = dnConf.socketTimeout + (HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length);
                int writeTimeout = dnConf.socketWriteTimeout + (HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
                NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
                mirrorSock.setTcpNoDelay(dnConf.getDataTransferServerTcpNoDelay());
                mirrorSock.setSoTimeout(timeoutValue);
                mirrorSock.setKeepAlive(true);
                if (dnConf.getTransferSocketSendBufferSize() > 0) {
                    mirrorSock.setSendBufferSize(dnConf.getTransferSocketSendBufferSize());
                }
                OutputStream unbufMirrorOut = NetUtils.getOutputStream(mirrorSock, writeTimeout);
                InputStream unbufMirrorIn = NetUtils.getInputStream(mirrorSock);
                DataEncryptionKeyFactory keyFactory = datanode.getDataEncryptionKeyFactoryForBlock(block);
                IOStreamPair saslStreams = datanode.saslClient.socketSend(mirrorSock, unbufMirrorOut, unbufMirrorIn, keyFactory, blockToken, targets[0]);
                unbufMirrorOut = saslStreams.out;
                unbufMirrorIn = saslStreams.in;
                mirrorOut = new DataOutputStream(new BufferedOutputStream(unbufMirrorOut, smallBufferSize));
                mirrorIn = new DataInputStream(unbufMirrorIn);
                if (targetPinnings != null && targetPinnings.length > 0) {
                    new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0], blockToken, clientname, targets, targetStorageTypes, srcDataNode, stage, pipelineSize, minBytesRcvd, maxBytesRcvd, latestGenerationStamp, requestedChecksum, cachingStrategy, allowLazyPersist, targetPinnings[0], targetPinnings);
                } else {
                    new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0], blockToken, clientname, targets, targetStorageTypes, srcDataNode, stage, pipelineSize, minBytesRcvd, maxBytesRcvd, latestGenerationStamp, requestedChecksum, cachingStrategy, allowLazyPersist, false, targetPinnings);
                }
                mirrorOut.flush();
                DataNodeFaultInjector.get().writeBlockAfterFlush();
                // read connect ack (only for clients, not for replication req)
                if (isClient) {
                    BlockOpResponseProto connectAck = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(mirrorIn));
                    mirrorInStatus = connectAck.getStatus();
                    firstBadLink = connectAck.getFirstBadLink();
                    if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
                        LOG.debug("Datanode " + targets.length + " got response for connect ack " + " from downstream datanode with firstbadlink as " + firstBadLink);
                    }
                }
            } catch (IOException e) {
                if (isClient) {
                    BlockOpResponseProto.newBuilder().setStatus(ERROR).setFirstBadLink(targets[0].getXferAddr()).build().writeDelimitedTo(replyOut);
                    replyOut.flush();
                }
                IOUtils.closeStream(mirrorOut);
                mirrorOut = null;
                IOUtils.closeStream(mirrorIn);
                mirrorIn = null;
                IOUtils.closeSocket(mirrorSock);
                mirrorSock = null;
                if (isClient) {
                    LOG.error(datanode + ":Exception transfering block " + block + " to mirror " + mirrorNode + ": " + e);
                    throw e;
                } else {
                    LOG.info(datanode + ":Exception transfering " + block + " to mirror " + mirrorNode + "- continuing without the mirror", e);
                    incrDatanodeNetworkErrors();
                }
            }
        }
        // send connect-ack to source for clients and not transfer-RBW/Finalized
        if (isClient && !isTransfer) {
            if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
                LOG.debug("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is " + firstBadLink);
            }
            BlockOpResponseProto.newBuilder().setStatus(mirrorInStatus).setFirstBadLink(firstBadLink).build().writeDelimitedTo(replyOut);
            replyOut.flush();
        }
        // receive the block and mirror to the next target
        if (blockReceiver != null) {
            String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
            blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets, false);
            // send close-ack for transfer-RBW/Finalized 
            if (isTransfer) {
                if (LOG.isTraceEnabled()) {
                    LOG.trace("TRANSFER: send close-ack");
                }
                writeResponse(SUCCESS, null, replyOut);
            }
        }
        // update its generation stamp
        if (isClient && stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
            block.setGenerationStamp(latestGenerationStamp);
            block.setNumBytes(minBytesRcvd);
        }
        // the block is finalized in the PacketResponder.
        if (isDatanode || stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
            datanode.closeBlock(block, null, storageUuid, isOnTransientStorage);
            LOG.info("Received " + block + " src: " + remoteAddress + " dest: " + localAddress + " of size " + block.getNumBytes());
        }
        if (isClient) {
            size = block.getNumBytes();
        }
    } catch (IOException ioe) {
        LOG.info("opWriteBlock " + block + " received exception " + ioe);
        incrDatanodeNetworkErrors();
        throw ioe;
    } finally {
        // close all opened streams
        IOUtils.closeStream(mirrorOut);
        IOUtils.closeStream(mirrorIn);
        IOUtils.closeStream(replyOut);
        IOUtils.closeSocket(mirrorSock);
        IOUtils.closeStream(blockReceiver);
        setCurrentBlockReceiver(null);
    }
    //update metrics
    datanode.getMetrics().addWriteBlockOp(elapsed());
    datanode.getMetrics().incrWritesFromClient(peer.isLocal(), size);
}
Also used : Status(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status) DataOutputStream(java.io.DataOutputStream) InetSocketAddress(java.net.InetSocketAddress) BufferedInputStream(java.io.BufferedInputStream) DataInputStream(java.io.DataInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) DataOutputStream(java.io.DataOutputStream) BufferedOutputStream(java.io.BufferedOutputStream) OutputStream(java.io.OutputStream) BlockOpResponseProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ByteString(com.google.protobuf.ByteString) DataInputStream(java.io.DataInputStream) DataEncryptionKeyFactory(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory) Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) BufferedOutputStream(java.io.BufferedOutputStream) DomainSocket(org.apache.hadoop.net.unix.DomainSocket) Socket(java.net.Socket)

Example 5 with BlockOpResponseProto

use of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto in project hadoop by apache.

the class DataXceiver method writeSuccessWithChecksumInfo.

private void writeSuccessWithChecksumInfo(BlockSender blockSender, DataOutputStream out) throws IOException {
    ReadOpChecksumInfoProto ckInfo = ReadOpChecksumInfoProto.newBuilder().setChecksum(DataTransferProtoUtil.toProto(blockSender.getChecksum())).setChunkOffset(blockSender.getOffset()).build();
    BlockOpResponseProto response = BlockOpResponseProto.newBuilder().setStatus(SUCCESS).setReadOpChecksumInfo(ckInfo).build();
    response.writeDelimitedTo(out);
    out.flush();
}
Also used : BlockOpResponseProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) ReadOpChecksumInfoProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)

Aggregations

BlockOpResponseProto (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)9 DataOutputStream (java.io.DataOutputStream)7 Sender (org.apache.hadoop.hdfs.protocol.datatransfer.Sender)7 DataInputStream (java.io.DataInputStream)6 BufferedOutputStream (java.io.BufferedOutputStream)5 IOException (java.io.IOException)5 IOStreamPair (org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair)4 FileInputStream (java.io.FileInputStream)3 InputStream (java.io.InputStream)3 InterruptedIOException (java.io.InterruptedIOException)3 OutputStream (java.io.OutputStream)3 Socket (java.net.Socket)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 ReadOpChecksumInfoProto (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)3 Status (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status)3 DomainSocket (org.apache.hadoop.net.unix.DomainSocket)3 ByteString (com.google.protobuf.ByteString)2 BufferedInputStream (java.io.BufferedInputStream)2 InetSocketAddress (java.net.InetSocketAddress)2 DataEncryptionKeyFactory (org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory)2