Search in sources :

Example 6 with IOStreamPair

use of org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair in project hadoop by apache.

the class DFSUtilClient method connectToDN.

/**
   * Connect to the given datanode's datantrasfer port, and return
   * the resulting IOStreamPair. This includes encryption wrapping, etc.
   */
public static IOStreamPair connectToDN(DatanodeInfo dn, int timeout, Configuration conf, SaslDataTransferClient saslClient, SocketFactory socketFactory, boolean connectToDnViaHostname, DataEncryptionKeyFactory dekFactory, Token<BlockTokenIdentifier> blockToken) throws IOException {
    boolean success = false;
    Socket sock = null;
    try {
        sock = socketFactory.createSocket();
        String dnAddr = dn.getXferAddr(connectToDnViaHostname);
        LOG.debug("Connecting to datanode {}", dnAddr);
        NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
        sock.setTcpNoDelay(getClientDataTransferTcpNoDelay(conf));
        sock.setSoTimeout(timeout);
        OutputStream unbufOut = NetUtils.getOutputStream(sock);
        InputStream unbufIn = NetUtils.getInputStream(sock);
        IOStreamPair pair = saslClient.newSocketSend(sock, unbufOut, unbufIn, dekFactory, blockToken, dn);
        IOStreamPair result = new IOStreamPair(new DataInputStream(pair.in), new DataOutputStream(new BufferedOutputStream(pair.out, DFSUtilClient.getSmallBufferSize(conf))));
        success = true;
        return result;
    } finally {
        if (!success) {
            IOUtils.closeSocket(sock);
        }
    }
}
Also used : IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) DataInputStream(java.io.DataInputStream) InputStream(java.io.InputStream) DataOutputStream(java.io.DataOutputStream) DataOutputStream(java.io.DataOutputStream) BufferedOutputStream(java.io.BufferedOutputStream) OutputStream(java.io.OutputStream) DataInputStream(java.io.DataInputStream) BufferedOutputStream(java.io.BufferedOutputStream) Socket(java.net.Socket)

Example 7 with IOStreamPair

use of org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair in project hadoop by apache.

the class DataStreamer method createBlockOutputStream.

// connects to the first datanode in the pipeline
// Returns true if success, otherwise return failure.
//
boolean createBlockOutputStream(DatanodeInfo[] nodes, StorageType[] nodeStorageTypes, long newGS, boolean recoveryFlag) {
    if (nodes.length == 0) {
        LOG.info("nodes are empty for write pipeline of " + block);
        return false;
    }
    String firstBadLink = "";
    boolean checkRestart = false;
    if (LOG.isDebugEnabled()) {
        LOG.debug("pipeline = " + Arrays.toString(nodes) + ", " + this);
    }
    // persist blocks on namenode on next flush
    persistBlocks.set(true);
    int refetchEncryptionKey = 1;
    while (true) {
        boolean result = false;
        DataOutputStream out = null;
        try {
            assert null == s : "Previous socket unclosed";
            assert null == blockReplyStream : "Previous blockReplyStream unclosed";
            s = createSocketForPipeline(nodes[0], nodes.length, dfsClient);
            long writeTimeout = dfsClient.getDatanodeWriteTimeout(nodes.length);
            long readTimeout = dfsClient.getDatanodeReadTimeout(nodes.length);
            OutputStream unbufOut = NetUtils.getOutputStream(s, writeTimeout);
            InputStream unbufIn = NetUtils.getInputStream(s, readTimeout);
            IOStreamPair saslStreams = dfsClient.saslClient.socketSend(s, unbufOut, unbufIn, dfsClient, accessToken, nodes[0]);
            unbufOut = saslStreams.out;
            unbufIn = saslStreams.in;
            out = new DataOutputStream(new BufferedOutputStream(unbufOut, DFSUtilClient.getSmallBufferSize(dfsClient.getConfiguration())));
            blockReplyStream = new DataInputStream(unbufIn);
            //
            // Xmit header info to datanode
            //
            BlockConstructionStage bcs = recoveryFlag ? stage.getRecoveryStage() : stage;
            // We cannot change the block length in 'block' as it counts the number
            // of bytes ack'ed.
            ExtendedBlock blockCopy = block.getCurrentBlock();
            blockCopy.setNumBytes(stat.getBlockSize());
            boolean[] targetPinnings = getPinnings(nodes);
            // send the request
            new Sender(out).writeBlock(blockCopy, nodeStorageTypes[0], accessToken, dfsClient.clientName, nodes, nodeStorageTypes, null, bcs, nodes.length, block.getNumBytes(), bytesSent, newGS, checksum4WriteBlock, cachingStrategy.get(), isLazyPersistFile, (targetPinnings != null && targetPinnings[0]), targetPinnings);
            // receive ack for connect
            BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(blockReplyStream));
            Status pipelineStatus = resp.getStatus();
            firstBadLink = resp.getFirstBadLink();
            // regular node error.
            if (PipelineAck.isRestartOOBStatus(pipelineStatus) && !errorState.isRestartingNode()) {
                checkRestart = true;
                throw new IOException("A datanode is restarting.");
            }
            String logInfo = "ack with firstBadLink as " + firstBadLink;
            DataTransferProtoUtil.checkBlockOpStatus(resp, logInfo);
            assert null == blockStream : "Previous blockStream unclosed";
            blockStream = out;
            // success
            result = true;
            errorState.resetInternalError();
        } catch (IOException ie) {
            if (!errorState.isRestartingNode()) {
                LOG.info("Exception in createBlockOutputStream " + this, ie);
            }
            if (ie instanceof InvalidEncryptionKeyException && refetchEncryptionKey > 0) {
                LOG.info("Will fetch a new encryption key and retry, " + "encryption key was invalid when connecting to " + nodes[0] + " : " + ie);
                // The encryption key used is invalid.
                refetchEncryptionKey--;
                dfsClient.clearDataEncryptionKey();
                // a new encryption key.
                continue;
            }
            // find the datanode that matches
            if (firstBadLink.length() != 0) {
                for (int i = 0; i < nodes.length; i++) {
                    // NB: Unconditionally using the xfer addr w/o hostname
                    if (firstBadLink.equals(nodes[i].getXferAddr())) {
                        errorState.setBadNodeIndex(i);
                        break;
                    }
                }
            } else {
                assert !checkRestart;
                errorState.setBadNodeIndex(0);
            }
            final int i = errorState.getBadNodeIndex();
            // Check whether there is a restart worth waiting for.
            if (checkRestart && shouldWaitForRestart(i)) {
                errorState.initRestartingNode(i, "Datanode " + i + " is restarting: " + nodes[i]);
            }
            errorState.setInternalError();
            lastException.set(ie);
            // error
            result = false;
        } finally {
            if (!result) {
                IOUtils.closeSocket(s);
                s = null;
                IOUtils.closeStream(out);
                IOUtils.closeStream(blockReplyStream);
                blockReplyStream = null;
            }
        }
        return result;
    }
}
Also used : Status(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) DataOutputStream(java.io.DataOutputStream) DataInputStream(java.io.DataInputStream) InputStream(java.io.InputStream) DataOutputStream(java.io.DataOutputStream) BufferedOutputStream(java.io.BufferedOutputStream) OutputStream(java.io.OutputStream) BlockOpResponseProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) DataInputStream(java.io.DataInputStream) Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) InvalidEncryptionKeyException(org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException) BufferedOutputStream(java.io.BufferedOutputStream) BlockConstructionStage(org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage)

Example 8 with IOStreamPair

use of org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair in project hadoop by apache.

the class DataTransferSaslUtil method createStreamPair.

/**
   * Create IOStreamPair of {@link org.apache.hadoop.crypto.CryptoInputStream}
   * and {@link org.apache.hadoop.crypto.CryptoOutputStream}
   *
   * @param conf the configuration
   * @param cipherOption negotiated cipher option
   * @param out underlying output stream
   * @param in underlying input stream
   * @param isServer is server side
   * @return IOStreamPair the stream pair
   * @throws IOException for any error
   */
public static IOStreamPair createStreamPair(Configuration conf, CipherOption cipherOption, OutputStream out, InputStream in, boolean isServer) throws IOException {
    LOG.debug("Creating IOStreamPair of CryptoInputStream and " + "CryptoOutputStream.");
    CryptoCodec codec = CryptoCodec.getInstance(conf, cipherOption.getCipherSuite());
    byte[] inKey = cipherOption.getInKey();
    byte[] inIv = cipherOption.getInIv();
    byte[] outKey = cipherOption.getOutKey();
    byte[] outIv = cipherOption.getOutIv();
    InputStream cIn = new CryptoInputStream(in, codec, isServer ? inKey : outKey, isServer ? inIv : outIv);
    OutputStream cOut = new CryptoOutputStream(out, codec, isServer ? outKey : inKey, isServer ? outIv : inIv);
    return new IOStreamPair(cIn, cOut);
}
Also used : CryptoInputStream(org.apache.hadoop.crypto.CryptoInputStream) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) CryptoInputStream(org.apache.hadoop.crypto.CryptoInputStream) InputStream(java.io.InputStream) CryptoOutputStream(org.apache.hadoop.crypto.CryptoOutputStream) OutputStream(java.io.OutputStream) CryptoCodec(org.apache.hadoop.crypto.CryptoCodec) CryptoOutputStream(org.apache.hadoop.crypto.CryptoOutputStream)

Example 9 with IOStreamPair

use of org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair in project hadoop by apache.

the class SaslDataTransferClient method newSocketSend.

/**
   * Sends client SASL negotiation for a newly allocated socket if required.
   *
   * @param socket connection socket
   * @param underlyingOut connection output stream
   * @param underlyingIn connection input stream
   * @param encryptionKeyFactory for creation of an encryption key
   * @param accessToken connection block access token
   * @param datanodeId ID of destination DataNode
   * @return new pair of streams, wrapped after SASL negotiation
   * @throws IOException for any error
   */
public IOStreamPair newSocketSend(Socket socket, OutputStream underlyingOut, InputStream underlyingIn, DataEncryptionKeyFactory encryptionKeyFactory, Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId) throws IOException {
    // The encryption key factory only returns a key if encryption is enabled.
    DataEncryptionKey encryptionKey = !trustedChannelResolver.isTrusted() ? encryptionKeyFactory.newDataEncryptionKey() : null;
    IOStreamPair ios = send(socket.getInetAddress(), underlyingOut, underlyingIn, encryptionKey, accessToken, datanodeId);
    return ios != null ? ios : new IOStreamPair(underlyingIn, underlyingOut);
}
Also used : DataEncryptionKey(org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair)

Example 10 with IOStreamPair

use of org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair in project hadoop by apache.

the class SaslDataTransferServer method getSaslStreams.

/**
   * Receives SASL negotiation for general-purpose handshake.
   *
   * @param peer connection peer
   * @param underlyingOut connection output stream
   * @param underlyingIn connection input stream
   * @return new pair of streams, wrapped after SASL negotiation
   * @throws IOException for any error
   */
private IOStreamPair getSaslStreams(Peer peer, OutputStream underlyingOut, InputStream underlyingIn) throws IOException {
    if (peer.hasSecureChannel() || dnConf.getTrustedChannelResolver().isTrusted(getPeerAddress(peer))) {
        return new IOStreamPair(underlyingIn, underlyingOut);
    }
    SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver();
    Map<String, String> saslProps = saslPropsResolver.getServerProperties(getPeerAddress(peer));
    CallbackHandler callbackHandler = new SaslServerCallbackHandler(new PasswordFunction() {

        @Override
        public char[] apply(String userName) throws IOException {
            return buildServerPassword(userName);
        }
    });
    return doSaslHandshake(peer, underlyingOut, underlyingIn, saslProps, callbackHandler);
}
Also used : CallbackHandler(javax.security.auth.callback.CallbackHandler) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) IOException(java.io.IOException) SaslPropertiesResolver(org.apache.hadoop.security.SaslPropertiesResolver)

Aggregations

IOStreamPair (org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair)11 InputStream (java.io.InputStream)7 DataInputStream (java.io.DataInputStream)6 DataOutputStream (java.io.DataOutputStream)6 IOException (java.io.IOException)6 OutputStream (java.io.OutputStream)6 BufferedOutputStream (java.io.BufferedOutputStream)5 Sender (org.apache.hadoop.hdfs.protocol.datatransfer.Sender)5 InterruptedIOException (java.io.InterruptedIOException)4 Socket (java.net.Socket)4 BlockOpResponseProto (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)4 ByteString (com.google.protobuf.ByteString)3 BufferedInputStream (java.io.BufferedInputStream)3 FileInputStream (java.io.FileInputStream)3 InetSocketAddress (java.net.InetSocketAddress)3 DataEncryptionKeyFactory (org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory)3 Status (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status)3 CallbackHandler (javax.security.auth.callback.CallbackHandler)2 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)2 DomainSocket (org.apache.hadoop.net.unix.DomainSocket)2