Search in sources :

Example 6 with SocketTimeoutException

use of java.net.SocketTimeoutException in project hadoop by apache.

the class TestKMS method testKMSTimeout.

/**
   * Test the configurable timeout in the KMSClientProvider.  Open up a
   * socket, but don't accept connections for it.  This leads to a timeout
   * when the KMS client attempts to connect.
   * @throws Exception
   */
@Test
public void testKMSTimeout() throws Exception {
    File confDir = getTestDir();
    Configuration conf = createBaseKMSConf(confDir);
    conf.setInt(KMSClientProvider.TIMEOUT_ATTR, 1);
    writeConf(confDir, conf);
    ServerSocket sock;
    int port;
    try {
        sock = new ServerSocket(0, 50, InetAddress.getByName("localhost"));
        port = sock.getLocalPort();
    } catch (Exception e) {
        /* Problem creating socket?  Just bail. */
        return;
    }
    URL url = new URL("http://localhost:" + port + "/kms");
    URI uri = createKMSUri(url);
    boolean caughtTimeout = false;
    try {
        KeyProvider kp = createProvider(uri, conf);
        kp.getKeys();
    } catch (SocketTimeoutException e) {
        caughtTimeout = true;
    } catch (IOException e) {
        Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
    }
    caughtTimeout = false;
    try {
        KeyProvider kp = createProvider(uri, conf);
        KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp).generateEncryptedKey("a");
    } catch (SocketTimeoutException e) {
        caughtTimeout = true;
    } catch (IOException e) {
        Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
    }
    caughtTimeout = false;
    try {
        KeyProvider kp = createProvider(uri, conf);
        KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp).decryptEncryptedKey(new KMSClientProvider.KMSEncryptedKeyVersion("a", "a", new byte[] { 1, 2 }, "EEK", new byte[] { 1, 2 }));
    } catch (SocketTimeoutException e) {
        caughtTimeout = true;
    } catch (IOException e) {
        Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
    }
    Assert.assertTrue(caughtTimeout);
    sock.close();
}
Also used : KeyProvider(org.apache.hadoop.crypto.key.KeyProvider) Configuration(org.apache.hadoop.conf.Configuration) ServerSocket(java.net.ServerSocket) IOException(java.io.IOException) URI(java.net.URI) KMSClientProvider(org.apache.hadoop.crypto.key.kms.KMSClientProvider) LoadBalancingKMSClientProvider(org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider) AuthorizationException(org.apache.hadoop.security.authorize.AuthorizationException) SocketTimeoutException(java.net.SocketTimeoutException) IOException(java.io.IOException) URL(java.net.URL) SocketTimeoutException(java.net.SocketTimeoutException) File(java.io.File) Test(org.junit.Test)

Example 7 with SocketTimeoutException

use of java.net.SocketTimeoutException in project hadoop by apache.

the class DataXceiver method run.

/**
   * Read/write data from/to the DataXceiverServer.
   */
@Override
public void run() {
    int opsProcessed = 0;
    Op op = null;
    try {
        synchronized (this) {
            xceiver = Thread.currentThread();
        }
        dataXceiverServer.addPeer(peer, Thread.currentThread(), this);
        peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
        InputStream input = socketIn;
        try {
            IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut, socketIn, datanode.getXferAddress().getPort(), datanode.getDatanodeId());
            input = new BufferedInputStream(saslStreams.in, smallBufferSize);
            socketOut = saslStreams.out;
        } catch (InvalidMagicNumberException imne) {
            if (imne.isHandshake4Encryption()) {
                LOG.info("Failed to read expected encryption handshake from client " + "at " + peer.getRemoteAddressString() + ". Perhaps the client " + "is running an older version of Hadoop which does not support " + "encryption");
            } else {
                LOG.info("Failed to read expected SASL data transfer protection " + "handshake from client at " + peer.getRemoteAddressString() + ". Perhaps the client is running an older version of Hadoop " + "which does not support SASL data transfer protection");
            }
            return;
        }
        super.initialize(new DataInputStream(input));
        // Setting keepalive timeout to 0 disable this behavior.
        do {
            updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));
            try {
                if (opsProcessed != 0) {
                    assert dnConf.socketKeepaliveTimeout > 0;
                    peer.setReadTimeout(dnConf.socketKeepaliveTimeout);
                } else {
                    peer.setReadTimeout(dnConf.socketTimeout);
                }
                op = readOp();
            } catch (InterruptedIOException ignored) {
                // Time out while we wait for client rpc
                break;
            } catch (EOFException | ClosedChannelException e) {
                // Since we optimistically expect the next op, it's quite normal to
                // get EOF here.
                LOG.debug("Cached {} closing after {} ops.  " + "This message is usually benign.", peer, opsProcessed);
                break;
            } catch (IOException err) {
                incrDatanodeNetworkErrors();
                throw err;
            }
            // restore normal timeout
            if (opsProcessed != 0) {
                peer.setReadTimeout(dnConf.socketTimeout);
            }
            opStartTime = monotonicNow();
            processOp(op);
            ++opsProcessed;
        } while ((peer != null) && (!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0));
    } catch (Throwable t) {
        String s = datanode.getDisplayName() + ":DataXceiver error processing " + ((op == null) ? "unknown" : op.name()) + " operation " + " src: " + remoteAddress + " dst: " + localAddress;
        if (op == Op.WRITE_BLOCK && t instanceof ReplicaAlreadyExistsException) {
            // at the same time.
            if (LOG.isTraceEnabled()) {
                LOG.trace(s, t);
            } else {
                LOG.info(s + "; " + t);
            }
        } else if (op == Op.READ_BLOCK && t instanceof SocketTimeoutException) {
            String s1 = "Likely the client has stopped reading, disconnecting it";
            s1 += " (" + s + ")";
            if (LOG.isTraceEnabled()) {
                LOG.trace(s1, t);
            } else {
                LOG.info(s1 + "; " + t);
            }
        } else if (t instanceof InvalidToken) {
            // checkAccess() method and this is not a server error.
            if (LOG.isTraceEnabled()) {
                LOG.trace(s, t);
            }
        } else {
            LOG.error(s, t);
        }
    } finally {
        collectThreadLocalStates();
        if (LOG.isDebugEnabled()) {
            LOG.debug(datanode.getDisplayName() + ":Number of active connections is: " + datanode.getXceiverCount());
        }
        updateCurrentThreadName("Cleaning up");
        if (peer != null) {
            dataXceiverServer.closePeer(peer);
            IOUtils.closeStream(in);
        }
    }
}
Also used : Op(org.apache.hadoop.hdfs.protocol.datatransfer.Op) InterruptedIOException(java.io.InterruptedIOException) ClosedChannelException(java.nio.channels.ClosedChannelException) BufferedInputStream(java.io.BufferedInputStream) DataInputStream(java.io.DataInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) InvalidMagicNumberException(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.InvalidMagicNumberException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ByteString(com.google.protobuf.ByteString) DataInputStream(java.io.DataInputStream) SocketTimeoutException(java.net.SocketTimeoutException) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) BufferedInputStream(java.io.BufferedInputStream) EOFException(java.io.EOFException) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken)

Example 8 with SocketTimeoutException

use of java.net.SocketTimeoutException in project hadoop by apache.

the class TestDFSClientRetries method testLeaseRenewSocketTimeout.

/**
   * Test DFSClient can continue to function after renewLease RPC
   * receives SocketTimeoutException.
   */
@Test
public void testLeaseRenewSocketTimeout() throws Exception {
    String file1 = "/testFile1";
    String file2 = "/testFile2";
    // Set short retry timeouts so this test runs faster
    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
    conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2 * 1000);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        NamenodeProtocols spyNN = spy(cluster.getNameNodeRpc());
        Mockito.doThrow(new SocketTimeoutException()).when(spyNN).renewLease(Mockito.anyString());
        DFSClient client = new DFSClient(null, spyNN, conf, null);
        // Get hold of the lease renewer instance used by the client
        LeaseRenewer leaseRenewer = client.getLeaseRenewer();
        leaseRenewer.setRenewalTime(100);
        OutputStream out1 = client.create(file1, false);
        Mockito.verify(spyNN, timeout(10000).times(1)).renewLease(Mockito.anyString());
        verifyEmptyLease(leaseRenewer);
        try {
            out1.write(new byte[256]);
            fail("existing output stream should be aborted");
        } catch (IOException e) {
        }
        // Verify DFSClient can do read operation after renewLease aborted.
        client.exists(file2);
        // Verify DFSClient can do write operation after renewLease no longer
        // throws SocketTimeoutException.
        Mockito.doNothing().when(spyNN).renewLease(Mockito.anyString());
        leaseRenewer = client.getLeaseRenewer();
        leaseRenewer.setRenewalTime(100);
        OutputStream out2 = client.create(file2, false);
        Mockito.verify(spyNN, timeout(10000).times(2)).renewLease(Mockito.anyString());
        out2.write(new byte[256]);
        out2.close();
        verifyEmptyLease(leaseRenewer);
    } finally {
        cluster.shutdown();
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) SocketTimeoutException(java.net.SocketTimeoutException) LeaseRenewer(org.apache.hadoop.hdfs.client.impl.LeaseRenewer) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) Matchers.anyString(org.mockito.Matchers.anyString) IOException(java.io.IOException) Test(org.junit.Test)

Example 9 with SocketTimeoutException

use of java.net.SocketTimeoutException in project hadoop by apache.

the class TestDFSClientRetries method testClientDNProtocolTimeout.

/** Test that timeout occurs when DN does not respond to RPC.
   * Start up a server and ask it to sleep for n seconds. Make an
   * RPC to the server and set rpcTimeout to less than n and ensure
   * that socketTimeoutException is obtained
   */
@Test
public void testClientDNProtocolTimeout() throws IOException {
    final Server server = new TestServer(1, true);
    server.start();
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
    ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
    LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
    ClientDatanodeProtocol proxy = null;
    try {
        proxy = DFSUtilClient.createClientDatanodeProtocolProxy(fakeDnId, conf, 500, false, fakeBlock);
        proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1));
        fail("Did not get expected exception: SocketTimeoutException");
    } catch (SocketTimeoutException e) {
        LOG.info("Got the expected Exception: SocketTimeoutException");
    } finally {
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
        server.stop();
    }
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) SocketTimeoutException(java.net.SocketTimeoutException) Server(org.apache.hadoop.ipc.Server) InetSocketAddress(java.net.InetSocketAddress) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) Test(org.junit.Test)

Example 10 with SocketTimeoutException

use of java.net.SocketTimeoutException in project hadoop by apache.

the class TestWebHdfsTimeouts method testConnectTimeout.

/**
   * Expect connect timeout, because the connection backlog is consumed.
   */
@Test(timeout = TEST_TIMEOUT)
public void testConnectTimeout() throws Exception {
    consumeConnectionBacklog();
    try {
        fs.listFiles(new Path("/"), false);
        fail("expected timeout");
    } catch (SocketTimeoutException e) {
        GenericTestUtils.assertExceptionContains(fs.getUri().getAuthority() + ": connect timed out", e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) SocketTimeoutException(java.net.SocketTimeoutException) Test(org.junit.Test)

Aggregations

SocketTimeoutException (java.net.SocketTimeoutException)369 IOException (java.io.IOException)200 Test (org.junit.Test)91 Socket (java.net.Socket)52 SocketException (java.net.SocketException)46 InputStream (java.io.InputStream)43 ServerSocket (java.net.ServerSocket)42 InetSocketAddress (java.net.InetSocketAddress)38 ConnectException (java.net.ConnectException)34 UnknownHostException (java.net.UnknownHostException)31 OutputStream (java.io.OutputStream)27 MalformedURLException (java.net.MalformedURLException)27 URL (java.net.URL)27 DatagramPacket (java.net.DatagramPacket)25 HttpURLConnection (java.net.HttpURLConnection)23 HashMap (java.util.HashMap)21 File (java.io.File)20 ArrayList (java.util.ArrayList)20 InterruptedIOException (java.io.InterruptedIOException)19 BufferedInputStream (java.io.BufferedInputStream)18