Search in sources :

Example 1 with Sender

use of org.apache.hadoop.hdfs.protocol.datatransfer.Sender in project hadoop by apache.

the class StripedBlockWriter method init.

/**
   * Initialize  output/input streams for transferring data to target
   * and send create block request.
   */
private void init() throws IOException {
    Socket socket = null;
    DataOutputStream out = null;
    DataInputStream in = null;
    boolean success = false;
    try {
        InetSocketAddress targetAddr = stripedWriter.getSocketAddress4Transfer(target);
        socket = datanode.newSocket();
        NetUtils.connect(socket, targetAddr, datanode.getDnConf().getSocketTimeout());
        socket.setTcpNoDelay(datanode.getDnConf().getDataTransferServerTcpNoDelay());
        socket.setSoTimeout(datanode.getDnConf().getSocketTimeout());
        Token<BlockTokenIdentifier> blockToken = datanode.getBlockAccessToken(block, EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE));
        long writeTimeout = datanode.getDnConf().getSocketWriteTimeout();
        OutputStream unbufOut = NetUtils.getOutputStream(socket, writeTimeout);
        InputStream unbufIn = NetUtils.getInputStream(socket);
        DataEncryptionKeyFactory keyFactory = datanode.getDataEncryptionKeyFactoryForBlock(block);
        IOStreamPair saslStreams = datanode.getSaslClient().socketSend(socket, unbufOut, unbufIn, keyFactory, blockToken, target);
        unbufOut = saslStreams.out;
        unbufIn = saslStreams.in;
        out = new DataOutputStream(new BufferedOutputStream(unbufOut, DFSUtilClient.getSmallBufferSize(conf)));
        in = new DataInputStream(unbufIn);
        DatanodeInfo source = new DatanodeInfoBuilder().setNodeID(datanode.getDatanodeId()).build();
        new Sender(out).writeBlock(block, storageType, blockToken, "", new DatanodeInfo[] { target }, new StorageType[] { storageType }, source, BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0, 0, 0, stripedWriter.getChecksum(), stripedWriter.getCachingStrategy(), false, false, null);
        targetSocket = socket;
        targetOutputStream = out;
        targetInputStream = in;
        success = true;
    } finally {
        if (!success) {
            IOUtils.closeStream(out);
            IOUtils.closeStream(in);
            IOUtils.closeStream(socket);
        }
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) DataOutputStream(java.io.DataOutputStream) InetSocketAddress(java.net.InetSocketAddress) DataInputStream(java.io.DataInputStream) InputStream(java.io.InputStream) BufferedOutputStream(java.io.BufferedOutputStream) DataOutputStream(java.io.DataOutputStream) OutputStream(java.io.OutputStream) DataInputStream(java.io.DataInputStream) DataEncryptionKeyFactory(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory) Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) BufferedOutputStream(java.io.BufferedOutputStream) Socket(java.net.Socket)

Example 2 with Sender

use of org.apache.hadoop.hdfs.protocol.datatransfer.Sender in project hadoop by apache.

the class DataXceiver method writeBlock.

@Override
public void writeBlock(final ExtendedBlock block, final StorageType storageType, final Token<BlockTokenIdentifier> blockToken, final String clientname, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes, final DatanodeInfo srcDataNode, final BlockConstructionStage stage, final int pipelineSize, final long minBytesRcvd, final long maxBytesRcvd, final long latestGenerationStamp, DataChecksum requestedChecksum, CachingStrategy cachingStrategy, boolean allowLazyPersist, final boolean pinning, final boolean[] targetPinnings) throws IOException {
    previousOpClientName = clientname;
    updateCurrentThreadName("Receiving block " + block);
    final boolean isDatanode = clientname.length() == 0;
    final boolean isClient = !isDatanode;
    final boolean isTransfer = stage == BlockConstructionStage.TRANSFER_RBW || stage == BlockConstructionStage.TRANSFER_FINALIZED;
    allowLazyPersist = allowLazyPersist && (dnConf.getAllowNonLocalLazyPersist() || peer.isLocal());
    long size = 0;
    // reply to upstream datanode or client 
    final DataOutputStream replyOut = getBufferedOutputStream();
    checkAccess(replyOut, isClient, block, blockToken, Op.WRITE_BLOCK, BlockTokenIdentifier.AccessMode.WRITE);
    // check single target for transfer-RBW/Finalized 
    if (isTransfer && targets.length > 0) {
        throw new IOException(stage + " does not support multiple targets " + Arrays.asList(targets));
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("opWriteBlock: stage=" + stage + ", clientname=" + clientname + "\n  block  =" + block + ", newGs=" + latestGenerationStamp + ", bytesRcvd=[" + minBytesRcvd + ", " + maxBytesRcvd + "]" + "\n  targets=" + Arrays.asList(targets) + "; pipelineSize=" + pipelineSize + ", srcDataNode=" + srcDataNode + ", pinning=" + pinning);
        LOG.debug("isDatanode=" + isDatanode + ", isClient=" + isClient + ", isTransfer=" + isTransfer);
        LOG.debug("writeBlock receive buf size " + peer.getReceiveBufferSize() + " tcp no delay " + peer.getTcpNoDelay());
    }
    // We later mutate block's generation stamp and length, but we need to
    // forward the original version of the block to downstream mirrors, so
    // make a copy here.
    final ExtendedBlock originalBlock = new ExtendedBlock(block);
    if (block.getNumBytes() == 0) {
        block.setNumBytes(dataXceiverServer.estimateBlockSize);
    }
    LOG.info("Receiving " + block + " src: " + remoteAddress + " dest: " + localAddress);
    // stream to next target
    DataOutputStream mirrorOut = null;
    // reply from next target
    DataInputStream mirrorIn = null;
    // socket to next target
    Socket mirrorSock = null;
    // the name:port of next target
    String mirrorNode = null;
    // first datanode that failed in connection setup
    String firstBadLink = "";
    Status mirrorInStatus = SUCCESS;
    final String storageUuid;
    final boolean isOnTransientStorage;
    try {
        final Replica replica;
        if (isDatanode || stage != BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
            // open a block receiver
            setCurrentBlockReceiver(getBlockReceiver(block, storageType, in, peer.getRemoteAddressString(), peer.getLocalAddressString(), stage, latestGenerationStamp, minBytesRcvd, maxBytesRcvd, clientname, srcDataNode, datanode, requestedChecksum, cachingStrategy, allowLazyPersist, pinning));
            replica = blockReceiver.getReplica();
        } else {
            replica = datanode.data.recoverClose(block, latestGenerationStamp, minBytesRcvd);
        }
        storageUuid = replica.getStorageUuid();
        isOnTransientStorage = replica.isOnTransientStorage();
        //
        if (targets.length > 0) {
            InetSocketAddress mirrorTarget = null;
            // Connect to backup machine
            mirrorNode = targets[0].getXferAddr(connectToDnViaHostname);
            if (LOG.isDebugEnabled()) {
                LOG.debug("Connecting to datanode " + mirrorNode);
            }
            mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
            mirrorSock = datanode.newSocket();
            try {
                DataNodeFaultInjector.get().failMirrorConnection();
                int timeoutValue = dnConf.socketTimeout + (HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length);
                int writeTimeout = dnConf.socketWriteTimeout + (HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
                NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
                mirrorSock.setTcpNoDelay(dnConf.getDataTransferServerTcpNoDelay());
                mirrorSock.setSoTimeout(timeoutValue);
                mirrorSock.setKeepAlive(true);
                if (dnConf.getTransferSocketSendBufferSize() > 0) {
                    mirrorSock.setSendBufferSize(dnConf.getTransferSocketSendBufferSize());
                }
                OutputStream unbufMirrorOut = NetUtils.getOutputStream(mirrorSock, writeTimeout);
                InputStream unbufMirrorIn = NetUtils.getInputStream(mirrorSock);
                DataEncryptionKeyFactory keyFactory = datanode.getDataEncryptionKeyFactoryForBlock(block);
                IOStreamPair saslStreams = datanode.saslClient.socketSend(mirrorSock, unbufMirrorOut, unbufMirrorIn, keyFactory, blockToken, targets[0]);
                unbufMirrorOut = saslStreams.out;
                unbufMirrorIn = saslStreams.in;
                mirrorOut = new DataOutputStream(new BufferedOutputStream(unbufMirrorOut, smallBufferSize));
                mirrorIn = new DataInputStream(unbufMirrorIn);
                if (targetPinnings != null && targetPinnings.length > 0) {
                    new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0], blockToken, clientname, targets, targetStorageTypes, srcDataNode, stage, pipelineSize, minBytesRcvd, maxBytesRcvd, latestGenerationStamp, requestedChecksum, cachingStrategy, allowLazyPersist, targetPinnings[0], targetPinnings);
                } else {
                    new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0], blockToken, clientname, targets, targetStorageTypes, srcDataNode, stage, pipelineSize, minBytesRcvd, maxBytesRcvd, latestGenerationStamp, requestedChecksum, cachingStrategy, allowLazyPersist, false, targetPinnings);
                }
                mirrorOut.flush();
                DataNodeFaultInjector.get().writeBlockAfterFlush();
                // read connect ack (only for clients, not for replication req)
                if (isClient) {
                    BlockOpResponseProto connectAck = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(mirrorIn));
                    mirrorInStatus = connectAck.getStatus();
                    firstBadLink = connectAck.getFirstBadLink();
                    if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
                        LOG.debug("Datanode " + targets.length + " got response for connect ack " + " from downstream datanode with firstbadlink as " + firstBadLink);
                    }
                }
            } catch (IOException e) {
                if (isClient) {
                    BlockOpResponseProto.newBuilder().setStatus(ERROR).setFirstBadLink(targets[0].getXferAddr()).build().writeDelimitedTo(replyOut);
                    replyOut.flush();
                }
                IOUtils.closeStream(mirrorOut);
                mirrorOut = null;
                IOUtils.closeStream(mirrorIn);
                mirrorIn = null;
                IOUtils.closeSocket(mirrorSock);
                mirrorSock = null;
                if (isClient) {
                    LOG.error(datanode + ":Exception transfering block " + block + " to mirror " + mirrorNode + ": " + e);
                    throw e;
                } else {
                    LOG.info(datanode + ":Exception transfering " + block + " to mirror " + mirrorNode + "- continuing without the mirror", e);
                    incrDatanodeNetworkErrors();
                }
            }
        }
        // send connect-ack to source for clients and not transfer-RBW/Finalized
        if (isClient && !isTransfer) {
            if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
                LOG.debug("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is " + firstBadLink);
            }
            BlockOpResponseProto.newBuilder().setStatus(mirrorInStatus).setFirstBadLink(firstBadLink).build().writeDelimitedTo(replyOut);
            replyOut.flush();
        }
        // receive the block and mirror to the next target
        if (blockReceiver != null) {
            String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
            blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets, false);
            // send close-ack for transfer-RBW/Finalized 
            if (isTransfer) {
                if (LOG.isTraceEnabled()) {
                    LOG.trace("TRANSFER: send close-ack");
                }
                writeResponse(SUCCESS, null, replyOut);
            }
        }
        // update its generation stamp
        if (isClient && stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
            block.setGenerationStamp(latestGenerationStamp);
            block.setNumBytes(minBytesRcvd);
        }
        // the block is finalized in the PacketResponder.
        if (isDatanode || stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
            datanode.closeBlock(block, null, storageUuid, isOnTransientStorage);
            LOG.info("Received " + block + " src: " + remoteAddress + " dest: " + localAddress + " of size " + block.getNumBytes());
        }
        if (isClient) {
            size = block.getNumBytes();
        }
    } catch (IOException ioe) {
        LOG.info("opWriteBlock " + block + " received exception " + ioe);
        incrDatanodeNetworkErrors();
        throw ioe;
    } finally {
        // close all opened streams
        IOUtils.closeStream(mirrorOut);
        IOUtils.closeStream(mirrorIn);
        IOUtils.closeStream(replyOut);
        IOUtils.closeSocket(mirrorSock);
        IOUtils.closeStream(blockReceiver);
        setCurrentBlockReceiver(null);
    }
    //update metrics
    datanode.getMetrics().addWriteBlockOp(elapsed());
    datanode.getMetrics().incrWritesFromClient(peer.isLocal(), size);
}
Also used : Status(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status) DataOutputStream(java.io.DataOutputStream) InetSocketAddress(java.net.InetSocketAddress) BufferedInputStream(java.io.BufferedInputStream) DataInputStream(java.io.DataInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) DataOutputStream(java.io.DataOutputStream) BufferedOutputStream(java.io.BufferedOutputStream) OutputStream(java.io.OutputStream) BlockOpResponseProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ByteString(com.google.protobuf.ByteString) DataInputStream(java.io.DataInputStream) DataEncryptionKeyFactory(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory) Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) BufferedOutputStream(java.io.BufferedOutputStream) DomainSocket(org.apache.hadoop.net.unix.DomainSocket) Socket(java.net.Socket)

Example 3 with Sender

use of org.apache.hadoop.hdfs.protocol.datatransfer.Sender in project hadoop by apache.

the class TestDiskError method testReplicationError.

/**
   * Test that when there is a failure replicating a block the temporary
   * and meta files are cleaned up and subsequent replication succeeds.
   */
@Test
public void testReplicationError() throws Exception {
    // create a file of replication factor of 1
    final Path fileName = new Path("/test.txt");
    final int fileLen = 1;
    DFSTestUtil.createFile(fs, fileName, 1, (short) 1, 1L);
    DFSTestUtil.waitReplication(fs, fileName, (short) 1);
    // get the block belonged to the created file
    LocatedBlocks blocks = NameNodeAdapter.getBlockLocations(cluster.getNameNode(), fileName.toString(), 0, (long) fileLen);
    assertEquals("Should only find 1 block", blocks.locatedBlockCount(), 1);
    LocatedBlock block = blocks.get(0);
    // bring up a second datanode
    cluster.startDataNodes(conf, 1, true, null, null);
    cluster.waitActive();
    final int sndNode = 1;
    DataNode datanode = cluster.getDataNodes().get(sndNode);
    FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(datanode);
    // replicate the block to the second datanode
    InetSocketAddress target = datanode.getXferAddress();
    Socket s = new Socket(target.getAddress(), target.getPort());
    // write the header.
    DataOutputStream out = new DataOutputStream(s.getOutputStream());
    DataChecksum checksum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512);
    new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT, BlockTokenSecretManager.DUMMY_TOKEN, "", new DatanodeInfo[0], new StorageType[0], null, BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L, checksum, CachingStrategy.newDefaultStrategy(), false, false, null);
    out.flush();
    // close the connection before sending the content of the block
    out.close();
    // the temporary block & meta files should be deleted
    String bpid = cluster.getNamesystem().getBlockPoolId();
    while (utils.getStoredReplicas(bpid).hasNext()) {
        Thread.sleep(100);
    }
    // then increase the file's replication factor
    fs.setReplication(fileName, (short) 2);
    // replication should succeed
    DFSTestUtil.waitReplication(fs, fileName, (short) 1);
    // clean up the file
    fs.delete(fileName, false);
}
Also used : Path(org.apache.hadoop.fs.Path) Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) InetSocketAddress(java.net.InetSocketAddress) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Socket(java.net.Socket) DataChecksum(org.apache.hadoop.util.DataChecksum) Test(org.junit.Test)

Example 4 with Sender

use of org.apache.hadoop.hdfs.protocol.datatransfer.Sender in project hadoop by apache.

the class DFSClient method inferChecksumTypeByReading.

/**
   * Infer the checksum type for a replica by sending an OP_READ_BLOCK
   * for the first byte of that replica. This is used for compatibility
   * with older HDFS versions which did not include the checksum type in
   * OpBlockChecksumResponseProto.
   *
   * @param lb the located block
   * @param dn the connected datanode
   * @return the inferred checksum type
   * @throws IOException if an error occurs
   */
protected Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn) throws IOException {
    IOStreamPair pair = connectToDN(dn, dfsClientConf.getSocketTimeout(), lb.getBlockToken());
    try {
        new Sender((DataOutputStream) pair.out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true, CachingStrategy.newDefaultStrategy());
        final BlockOpResponseProto reply = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(pair.in));
        String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
        DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);
        return PBHelperClient.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
    } finally {
        IOUtilsClient.cleanup(null, pair.in, pair.out);
    }
}
Also used : Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) DataOutputStream(java.io.DataOutputStream) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) BlockOpResponseProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)

Example 5 with Sender

use of org.apache.hadoop.hdfs.protocol.datatransfer.Sender in project hadoop by apache.

the class TestBlockReplacement method replaceBlock.

/*
   * Replace block
   */
private boolean replaceBlock(ExtendedBlock block, DatanodeInfo source, DatanodeInfo sourceProxy, DatanodeInfo destination, StorageType targetStorageType, Status opStatus) throws IOException, SocketException {
    Socket sock = new Socket();
    try {
        sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()), HdfsConstants.READ_TIMEOUT);
        sock.setKeepAlive(true);
        // sendRequest
        DataOutputStream out = new DataOutputStream(sock.getOutputStream());
        new Sender(out).replaceBlock(block, targetStorageType, BlockTokenSecretManager.DUMMY_TOKEN, source.getDatanodeUuid(), sourceProxy);
        out.flush();
        // receiveResponse
        DataInputStream reply = new DataInputStream(sock.getInputStream());
        BlockOpResponseProto proto = BlockOpResponseProto.parseDelimitedFrom(reply);
        while (proto.getStatus() == Status.IN_PROGRESS) {
            proto = BlockOpResponseProto.parseDelimitedFrom(reply);
        }
        return proto.getStatus() == opStatus;
    } finally {
        sock.close();
    }
}
Also used : Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) DataOutputStream(java.io.DataOutputStream) BlockOpResponseProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) DataInputStream(java.io.DataInputStream) Socket(java.net.Socket)

Aggregations

DataOutputStream (java.io.DataOutputStream)10 Sender (org.apache.hadoop.hdfs.protocol.datatransfer.Sender)10 DataInputStream (java.io.DataInputStream)8 BufferedOutputStream (java.io.BufferedOutputStream)7 BlockOpResponseProto (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)7 Socket (java.net.Socket)6 IOException (java.io.IOException)5 IOStreamPair (org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair)5 InputStream (java.io.InputStream)4 OutputStream (java.io.OutputStream)4 InetSocketAddress (java.net.InetSocketAddress)4 DomainSocket (org.apache.hadoop.net.unix.DomainSocket)4 FileInputStream (java.io.FileInputStream)3 InterruptedIOException (java.io.InterruptedIOException)3 DataEncryptionKeyFactory (org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory)3 Status (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status)3 DataChecksum (org.apache.hadoop.util.DataChecksum)3 ByteString (com.google.protobuf.ByteString)2 BufferedInputStream (java.io.BufferedInputStream)2 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2