Search in sources :

Example 16 with Socket

use of java.net.Socket in project hadoop by apache.

the class StandardSocketFactory method createSocket.

@Override
public Socket createSocket(String host, int port) throws IOException, UnknownHostException {
    Socket socket = createSocket();
    socket.connect(new InetSocketAddress(host, port));
    return socket;
}
Also used : InetSocketAddress(java.net.InetSocketAddress) Socket(java.net.Socket)

Example 17 with Socket

use of java.net.Socket in project flink by apache.

the class ConnectionUtils method tryToConnect.

/**
	 *
	 * @param fromAddress The address to connect from.
	 * @param toSocket The socket address to connect to.
	 * @param timeout The timeout fr the connection.
	 * @param logFailed Flag to indicate whether to log failed attempts on info level
	 *                  (failed attempts are always logged on DEBUG level).
	 * @return True, if the connection was successful, false otherwise.
	 * @throws IOException Thrown if the socket cleanup fails.
	 */
private static boolean tryToConnect(InetAddress fromAddress, SocketAddress toSocket, int timeout, boolean logFailed) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Trying to connect to (" + toSocket + ") from local address " + fromAddress + " with timeout " + timeout);
    }
    try (Socket socket = new Socket()) {
        // port 0 = let the OS choose the port
        SocketAddress bindP = new InetSocketAddress(fromAddress, 0);
        // machine
        socket.bind(bindP);
        socket.connect(toSocket, timeout);
        return true;
    } catch (Exception ex) {
        String message = "Failed to connect from address '" + fromAddress + "': " + ex.getMessage();
        if (LOG.isDebugEnabled()) {
            LOG.debug(message, ex);
        } else if (logFailed) {
            LOG.info(message);
        }
        return false;
    }
}
Also used : InetSocketAddress(java.net.InetSocketAddress) SocketAddress(java.net.SocketAddress) InetSocketAddress(java.net.InetSocketAddress) Socket(java.net.Socket) IOException(java.io.IOException) LeaderRetrievalException(org.apache.flink.runtime.leaderretrieval.LeaderRetrievalException) UnknownHostException(java.net.UnknownHostException)

Example 18 with Socket

use of java.net.Socket in project hadoop by apache.

the class StripedBlockWriter method init.

/**
   * Initialize  output/input streams for transferring data to target
   * and send create block request.
   */
private void init() throws IOException {
    Socket socket = null;
    DataOutputStream out = null;
    DataInputStream in = null;
    boolean success = false;
    try {
        InetSocketAddress targetAddr = stripedWriter.getSocketAddress4Transfer(target);
        socket = datanode.newSocket();
        NetUtils.connect(socket, targetAddr, datanode.getDnConf().getSocketTimeout());
        socket.setTcpNoDelay(datanode.getDnConf().getDataTransferServerTcpNoDelay());
        socket.setSoTimeout(datanode.getDnConf().getSocketTimeout());
        Token<BlockTokenIdentifier> blockToken = datanode.getBlockAccessToken(block, EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE));
        long writeTimeout = datanode.getDnConf().getSocketWriteTimeout();
        OutputStream unbufOut = NetUtils.getOutputStream(socket, writeTimeout);
        InputStream unbufIn = NetUtils.getInputStream(socket);
        DataEncryptionKeyFactory keyFactory = datanode.getDataEncryptionKeyFactoryForBlock(block);
        IOStreamPair saslStreams = datanode.getSaslClient().socketSend(socket, unbufOut, unbufIn, keyFactory, blockToken, target);
        unbufOut = saslStreams.out;
        unbufIn = saslStreams.in;
        out = new DataOutputStream(new BufferedOutputStream(unbufOut, DFSUtilClient.getSmallBufferSize(conf)));
        in = new DataInputStream(unbufIn);
        DatanodeInfo source = new DatanodeInfoBuilder().setNodeID(datanode.getDatanodeId()).build();
        new Sender(out).writeBlock(block, storageType, blockToken, "", new DatanodeInfo[] { target }, new StorageType[] { storageType }, source, BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0, 0, 0, stripedWriter.getChecksum(), stripedWriter.getCachingStrategy(), false, false, null);
        targetSocket = socket;
        targetOutputStream = out;
        targetInputStream = in;
        success = true;
    } finally {
        if (!success) {
            IOUtils.closeStream(out);
            IOUtils.closeStream(in);
            IOUtils.closeStream(socket);
        }
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) DataOutputStream(java.io.DataOutputStream) InetSocketAddress(java.net.InetSocketAddress) DataInputStream(java.io.DataInputStream) InputStream(java.io.InputStream) BufferedOutputStream(java.io.BufferedOutputStream) DataOutputStream(java.io.DataOutputStream) OutputStream(java.io.OutputStream) DataInputStream(java.io.DataInputStream) DataEncryptionKeyFactory(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory) Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) BufferedOutputStream(java.io.BufferedOutputStream) Socket(java.net.Socket)

Example 19 with Socket

use of java.net.Socket in project hadoop by apache.

the class DataXceiver method writeBlock.

@Override
public void writeBlock(final ExtendedBlock block, final StorageType storageType, final Token<BlockTokenIdentifier> blockToken, final String clientname, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes, final DatanodeInfo srcDataNode, final BlockConstructionStage stage, final int pipelineSize, final long minBytesRcvd, final long maxBytesRcvd, final long latestGenerationStamp, DataChecksum requestedChecksum, CachingStrategy cachingStrategy, boolean allowLazyPersist, final boolean pinning, final boolean[] targetPinnings) throws IOException {
    previousOpClientName = clientname;
    updateCurrentThreadName("Receiving block " + block);
    final boolean isDatanode = clientname.length() == 0;
    final boolean isClient = !isDatanode;
    final boolean isTransfer = stage == BlockConstructionStage.TRANSFER_RBW || stage == BlockConstructionStage.TRANSFER_FINALIZED;
    allowLazyPersist = allowLazyPersist && (dnConf.getAllowNonLocalLazyPersist() || peer.isLocal());
    long size = 0;
    // reply to upstream datanode or client 
    final DataOutputStream replyOut = getBufferedOutputStream();
    checkAccess(replyOut, isClient, block, blockToken, Op.WRITE_BLOCK, BlockTokenIdentifier.AccessMode.WRITE);
    // check single target for transfer-RBW/Finalized 
    if (isTransfer && targets.length > 0) {
        throw new IOException(stage + " does not support multiple targets " + Arrays.asList(targets));
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("opWriteBlock: stage=" + stage + ", clientname=" + clientname + "\n  block  =" + block + ", newGs=" + latestGenerationStamp + ", bytesRcvd=[" + minBytesRcvd + ", " + maxBytesRcvd + "]" + "\n  targets=" + Arrays.asList(targets) + "; pipelineSize=" + pipelineSize + ", srcDataNode=" + srcDataNode + ", pinning=" + pinning);
        LOG.debug("isDatanode=" + isDatanode + ", isClient=" + isClient + ", isTransfer=" + isTransfer);
        LOG.debug("writeBlock receive buf size " + peer.getReceiveBufferSize() + " tcp no delay " + peer.getTcpNoDelay());
    }
    // We later mutate block's generation stamp and length, but we need to
    // forward the original version of the block to downstream mirrors, so
    // make a copy here.
    final ExtendedBlock originalBlock = new ExtendedBlock(block);
    if (block.getNumBytes() == 0) {
        block.setNumBytes(dataXceiverServer.estimateBlockSize);
    }
    LOG.info("Receiving " + block + " src: " + remoteAddress + " dest: " + localAddress);
    // stream to next target
    DataOutputStream mirrorOut = null;
    // reply from next target
    DataInputStream mirrorIn = null;
    // socket to next target
    Socket mirrorSock = null;
    // the name:port of next target
    String mirrorNode = null;
    // first datanode that failed in connection setup
    String firstBadLink = "";
    Status mirrorInStatus = SUCCESS;
    final String storageUuid;
    final boolean isOnTransientStorage;
    try {
        final Replica replica;
        if (isDatanode || stage != BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
            // open a block receiver
            setCurrentBlockReceiver(getBlockReceiver(block, storageType, in, peer.getRemoteAddressString(), peer.getLocalAddressString(), stage, latestGenerationStamp, minBytesRcvd, maxBytesRcvd, clientname, srcDataNode, datanode, requestedChecksum, cachingStrategy, allowLazyPersist, pinning));
            replica = blockReceiver.getReplica();
        } else {
            replica = datanode.data.recoverClose(block, latestGenerationStamp, minBytesRcvd);
        }
        storageUuid = replica.getStorageUuid();
        isOnTransientStorage = replica.isOnTransientStorage();
        //
        if (targets.length > 0) {
            InetSocketAddress mirrorTarget = null;
            // Connect to backup machine
            mirrorNode = targets[0].getXferAddr(connectToDnViaHostname);
            if (LOG.isDebugEnabled()) {
                LOG.debug("Connecting to datanode " + mirrorNode);
            }
            mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
            mirrorSock = datanode.newSocket();
            try {
                DataNodeFaultInjector.get().failMirrorConnection();
                int timeoutValue = dnConf.socketTimeout + (HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length);
                int writeTimeout = dnConf.socketWriteTimeout + (HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
                NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
                mirrorSock.setTcpNoDelay(dnConf.getDataTransferServerTcpNoDelay());
                mirrorSock.setSoTimeout(timeoutValue);
                mirrorSock.setKeepAlive(true);
                if (dnConf.getTransferSocketSendBufferSize() > 0) {
                    mirrorSock.setSendBufferSize(dnConf.getTransferSocketSendBufferSize());
                }
                OutputStream unbufMirrorOut = NetUtils.getOutputStream(mirrorSock, writeTimeout);
                InputStream unbufMirrorIn = NetUtils.getInputStream(mirrorSock);
                DataEncryptionKeyFactory keyFactory = datanode.getDataEncryptionKeyFactoryForBlock(block);
                IOStreamPair saslStreams = datanode.saslClient.socketSend(mirrorSock, unbufMirrorOut, unbufMirrorIn, keyFactory, blockToken, targets[0]);
                unbufMirrorOut = saslStreams.out;
                unbufMirrorIn = saslStreams.in;
                mirrorOut = new DataOutputStream(new BufferedOutputStream(unbufMirrorOut, smallBufferSize));
                mirrorIn = new DataInputStream(unbufMirrorIn);
                if (targetPinnings != null && targetPinnings.length > 0) {
                    new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0], blockToken, clientname, targets, targetStorageTypes, srcDataNode, stage, pipelineSize, minBytesRcvd, maxBytesRcvd, latestGenerationStamp, requestedChecksum, cachingStrategy, allowLazyPersist, targetPinnings[0], targetPinnings);
                } else {
                    new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0], blockToken, clientname, targets, targetStorageTypes, srcDataNode, stage, pipelineSize, minBytesRcvd, maxBytesRcvd, latestGenerationStamp, requestedChecksum, cachingStrategy, allowLazyPersist, false, targetPinnings);
                }
                mirrorOut.flush();
                DataNodeFaultInjector.get().writeBlockAfterFlush();
                // read connect ack (only for clients, not for replication req)
                if (isClient) {
                    BlockOpResponseProto connectAck = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(mirrorIn));
                    mirrorInStatus = connectAck.getStatus();
                    firstBadLink = connectAck.getFirstBadLink();
                    if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
                        LOG.debug("Datanode " + targets.length + " got response for connect ack " + " from downstream datanode with firstbadlink as " + firstBadLink);
                    }
                }
            } catch (IOException e) {
                if (isClient) {
                    BlockOpResponseProto.newBuilder().setStatus(ERROR).setFirstBadLink(targets[0].getXferAddr()).build().writeDelimitedTo(replyOut);
                    replyOut.flush();
                }
                IOUtils.closeStream(mirrorOut);
                mirrorOut = null;
                IOUtils.closeStream(mirrorIn);
                mirrorIn = null;
                IOUtils.closeSocket(mirrorSock);
                mirrorSock = null;
                if (isClient) {
                    LOG.error(datanode + ":Exception transfering block " + block + " to mirror " + mirrorNode + ": " + e);
                    throw e;
                } else {
                    LOG.info(datanode + ":Exception transfering " + block + " to mirror " + mirrorNode + "- continuing without the mirror", e);
                    incrDatanodeNetworkErrors();
                }
            }
        }
        // send connect-ack to source for clients and not transfer-RBW/Finalized
        if (isClient && !isTransfer) {
            if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
                LOG.debug("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is " + firstBadLink);
            }
            BlockOpResponseProto.newBuilder().setStatus(mirrorInStatus).setFirstBadLink(firstBadLink).build().writeDelimitedTo(replyOut);
            replyOut.flush();
        }
        // receive the block and mirror to the next target
        if (blockReceiver != null) {
            String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
            blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets, false);
            // send close-ack for transfer-RBW/Finalized 
            if (isTransfer) {
                if (LOG.isTraceEnabled()) {
                    LOG.trace("TRANSFER: send close-ack");
                }
                writeResponse(SUCCESS, null, replyOut);
            }
        }
        // update its generation stamp
        if (isClient && stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
            block.setGenerationStamp(latestGenerationStamp);
            block.setNumBytes(minBytesRcvd);
        }
        // the block is finalized in the PacketResponder.
        if (isDatanode || stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
            datanode.closeBlock(block, null, storageUuid, isOnTransientStorage);
            LOG.info("Received " + block + " src: " + remoteAddress + " dest: " + localAddress + " of size " + block.getNumBytes());
        }
        if (isClient) {
            size = block.getNumBytes();
        }
    } catch (IOException ioe) {
        LOG.info("opWriteBlock " + block + " received exception " + ioe);
        incrDatanodeNetworkErrors();
        throw ioe;
    } finally {
        // close all opened streams
        IOUtils.closeStream(mirrorOut);
        IOUtils.closeStream(mirrorIn);
        IOUtils.closeStream(replyOut);
        IOUtils.closeSocket(mirrorSock);
        IOUtils.closeStream(blockReceiver);
        setCurrentBlockReceiver(null);
    }
    //update metrics
    datanode.getMetrics().addWriteBlockOp(elapsed());
    datanode.getMetrics().incrWritesFromClient(peer.isLocal(), size);
}
Also used : Status(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status) DataOutputStream(java.io.DataOutputStream) InetSocketAddress(java.net.InetSocketAddress) BufferedInputStream(java.io.BufferedInputStream) DataInputStream(java.io.DataInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) DataOutputStream(java.io.DataOutputStream) BufferedOutputStream(java.io.BufferedOutputStream) OutputStream(java.io.OutputStream) BlockOpResponseProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ByteString(com.google.protobuf.ByteString) DataInputStream(java.io.DataInputStream) DataEncryptionKeyFactory(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory) Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) BufferedOutputStream(java.io.BufferedOutputStream) DomainSocket(org.apache.hadoop.net.unix.DomainSocket) Socket(java.net.Socket)

Example 20 with Socket

use of java.net.Socket in project hadoop by apache.

the class BlockReaderTestUtil method getBlockReader.

/**
   * Get a BlockReader for the given block.
   */
public static BlockReader getBlockReader(final DistributedFileSystem fs, LocatedBlock testBlock, int offset, long lenToRead) throws IOException {
    InetSocketAddress targetAddr = null;
    ExtendedBlock block = testBlock.getBlock();
    DatanodeInfo[] nodes = testBlock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
    return new BlockReaderFactory(fs.getClient().getConf()).setInetSocketAddress(targetAddr).setBlock(block).setFileName(targetAddr.toString() + ":" + block.getBlockId()).setBlockToken(testBlock.getBlockToken()).setStartOffset(offset).setLength(lenToRead).setVerifyChecksum(true).setClientName("BlockReaderTestUtil").setDatanodeInfo(nodes[0]).setClientCacheContext(ClientContext.getFromConf(fs.getConf())).setCachingStrategy(CachingStrategy.newDefaultStrategy()).setConfiguration(fs.getConf()).setAllowShortCircuitLocalReads(true).setTracer(FsTracer.get(fs.getConf())).setRemotePeerFactory(new RemotePeerFactory() {

        @Override
        public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(fs.getConf()).createSocket();
            try {
                sock.connect(addr, HdfsConstants.READ_TIMEOUT);
                sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
                peer = DFSUtilClient.peerFromSocket(sock);
            } finally {
                if (peer == null) {
                    IOUtils.closeQuietly(sock);
                }
            }
            return peer;
        }
    }).build();
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) InetSocketAddress(java.net.InetSocketAddress) Peer(org.apache.hadoop.hdfs.net.Peer) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Token(org.apache.hadoop.security.token.Token) RemotePeerFactory(org.apache.hadoop.hdfs.RemotePeerFactory) Socket(java.net.Socket)

Aggregations

Socket (java.net.Socket)1553 IOException (java.io.IOException)664 ServerSocket (java.net.ServerSocket)548 OutputStream (java.io.OutputStream)364 InetSocketAddress (java.net.InetSocketAddress)350 Test (org.junit.Test)348 InputStream (java.io.InputStream)249 InputStreamReader (java.io.InputStreamReader)170 BufferedReader (java.io.BufferedReader)151 SocketException (java.net.SocketException)132 SSLSocket (javax.net.ssl.SSLSocket)103 SocketTimeoutException (java.net.SocketTimeoutException)97 UnknownHostException (java.net.UnknownHostException)84 ConnectException (java.net.ConnectException)82 ByteArrayOutputStream (java.io.ByteArrayOutputStream)75 InetAddress (java.net.InetAddress)73 OutputStreamWriter (java.io.OutputStreamWriter)70 ServletOutputStream (javax.servlet.ServletOutputStream)68 DataOutputStream (java.io.DataOutputStream)67 CountDownLatch (java.util.concurrent.CountDownLatch)64