use of java.net.SocketTimeoutException in project hadoop by apache.
the class TestKMS method testKMSTimeout.
/**
* Test the configurable timeout in the KMSClientProvider. Open up a
* socket, but don't accept connections for it. This leads to a timeout
* when the KMS client attempts to connect.
* @throws Exception
*/
@Test
public void testKMSTimeout() throws Exception {
File confDir = getTestDir();
Configuration conf = createBaseKMSConf(confDir);
conf.setInt(KMSClientProvider.TIMEOUT_ATTR, 1);
writeConf(confDir, conf);
ServerSocket sock;
int port;
try {
sock = new ServerSocket(0, 50, InetAddress.getByName("localhost"));
port = sock.getLocalPort();
} catch (Exception e) {
/* Problem creating socket? Just bail. */
return;
}
URL url = new URL("http://localhost:" + port + "/kms");
URI uri = createKMSUri(url);
boolean caughtTimeout = false;
try {
KeyProvider kp = createProvider(uri, conf);
kp.getKeys();
} catch (SocketTimeoutException e) {
caughtTimeout = true;
} catch (IOException e) {
Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
}
caughtTimeout = false;
try {
KeyProvider kp = createProvider(uri, conf);
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp).generateEncryptedKey("a");
} catch (SocketTimeoutException e) {
caughtTimeout = true;
} catch (IOException e) {
Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
}
caughtTimeout = false;
try {
KeyProvider kp = createProvider(uri, conf);
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp).decryptEncryptedKey(new KMSClientProvider.KMSEncryptedKeyVersion("a", "a", new byte[] { 1, 2 }, "EEK", new byte[] { 1, 2 }));
} catch (SocketTimeoutException e) {
caughtTimeout = true;
} catch (IOException e) {
Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
}
Assert.assertTrue(caughtTimeout);
sock.close();
}
use of java.net.SocketTimeoutException in project hadoop by apache.
the class DataXceiver method run.
/**
* Read/write data from/to the DataXceiverServer.
*/
@Override
public void run() {
int opsProcessed = 0;
Op op = null;
try {
synchronized (this) {
xceiver = Thread.currentThread();
}
dataXceiverServer.addPeer(peer, Thread.currentThread(), this);
peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
InputStream input = socketIn;
try {
IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut, socketIn, datanode.getXferAddress().getPort(), datanode.getDatanodeId());
input = new BufferedInputStream(saslStreams.in, smallBufferSize);
socketOut = saslStreams.out;
} catch (InvalidMagicNumberException imne) {
if (imne.isHandshake4Encryption()) {
LOG.info("Failed to read expected encryption handshake from client " + "at " + peer.getRemoteAddressString() + ". Perhaps the client " + "is running an older version of Hadoop which does not support " + "encryption");
} else {
LOG.info("Failed to read expected SASL data transfer protection " + "handshake from client at " + peer.getRemoteAddressString() + ". Perhaps the client is running an older version of Hadoop " + "which does not support SASL data transfer protection");
}
return;
}
super.initialize(new DataInputStream(input));
// Setting keepalive timeout to 0 disable this behavior.
do {
updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));
try {
if (opsProcessed != 0) {
assert dnConf.socketKeepaliveTimeout > 0;
peer.setReadTimeout(dnConf.socketKeepaliveTimeout);
} else {
peer.setReadTimeout(dnConf.socketTimeout);
}
op = readOp();
} catch (InterruptedIOException ignored) {
// Time out while we wait for client rpc
break;
} catch (EOFException | ClosedChannelException e) {
// Since we optimistically expect the next op, it's quite normal to
// get EOF here.
LOG.debug("Cached {} closing after {} ops. " + "This message is usually benign.", peer, opsProcessed);
break;
} catch (IOException err) {
incrDatanodeNetworkErrors();
throw err;
}
// restore normal timeout
if (opsProcessed != 0) {
peer.setReadTimeout(dnConf.socketTimeout);
}
opStartTime = monotonicNow();
processOp(op);
++opsProcessed;
} while ((peer != null) && (!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0));
} catch (Throwable t) {
String s = datanode.getDisplayName() + ":DataXceiver error processing " + ((op == null) ? "unknown" : op.name()) + " operation " + " src: " + remoteAddress + " dst: " + localAddress;
if (op == Op.WRITE_BLOCK && t instanceof ReplicaAlreadyExistsException) {
// at the same time.
if (LOG.isTraceEnabled()) {
LOG.trace(s, t);
} else {
LOG.info(s + "; " + t);
}
} else if (op == Op.READ_BLOCK && t instanceof SocketTimeoutException) {
String s1 = "Likely the client has stopped reading, disconnecting it";
s1 += " (" + s + ")";
if (LOG.isTraceEnabled()) {
LOG.trace(s1, t);
} else {
LOG.info(s1 + "; " + t);
}
} else if (t instanceof InvalidToken) {
// checkAccess() method and this is not a server error.
if (LOG.isTraceEnabled()) {
LOG.trace(s, t);
}
} else {
LOG.error(s, t);
}
} finally {
collectThreadLocalStates();
if (LOG.isDebugEnabled()) {
LOG.debug(datanode.getDisplayName() + ":Number of active connections is: " + datanode.getXceiverCount());
}
updateCurrentThreadName("Cleaning up");
if (peer != null) {
dataXceiverServer.closePeer(peer);
IOUtils.closeStream(in);
}
}
}
use of java.net.SocketTimeoutException in project hadoop by apache.
the class TestDFSClientRetries method testLeaseRenewSocketTimeout.
/**
* Test DFSClient can continue to function after renewLease RPC
* receives SocketTimeoutException.
*/
@Test
public void testLeaseRenewSocketTimeout() throws Exception {
String file1 = "/testFile1";
String file2 = "/testFile2";
// Set short retry timeouts so this test runs faster
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2 * 1000);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
NamenodeProtocols spyNN = spy(cluster.getNameNodeRpc());
Mockito.doThrow(new SocketTimeoutException()).when(spyNN).renewLease(Mockito.anyString());
DFSClient client = new DFSClient(null, spyNN, conf, null);
// Get hold of the lease renewer instance used by the client
LeaseRenewer leaseRenewer = client.getLeaseRenewer();
leaseRenewer.setRenewalTime(100);
OutputStream out1 = client.create(file1, false);
Mockito.verify(spyNN, timeout(10000).times(1)).renewLease(Mockito.anyString());
verifyEmptyLease(leaseRenewer);
try {
out1.write(new byte[256]);
fail("existing output stream should be aborted");
} catch (IOException e) {
}
// Verify DFSClient can do read operation after renewLease aborted.
client.exists(file2);
// Verify DFSClient can do write operation after renewLease no longer
// throws SocketTimeoutException.
Mockito.doNothing().when(spyNN).renewLease(Mockito.anyString());
leaseRenewer = client.getLeaseRenewer();
leaseRenewer.setRenewalTime(100);
OutputStream out2 = client.create(file2, false);
Mockito.verify(spyNN, timeout(10000).times(2)).renewLease(Mockito.anyString());
out2.write(new byte[256]);
out2.close();
verifyEmptyLease(leaseRenewer);
} finally {
cluster.shutdown();
}
}
use of java.net.SocketTimeoutException in project hadoop by apache.
the class TestDFSClientRetries method testClientDNProtocolTimeout.
/** Test that timeout occurs when DN does not respond to RPC.
* Start up a server and ask it to sleep for n seconds. Make an
* RPC to the server and set rpcTimeout to less than n and ensure
* that socketTimeoutException is obtained
*/
@Test
public void testClientDNProtocolTimeout() throws IOException {
final Server server = new TestServer(1, true);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
ClientDatanodeProtocol proxy = null;
try {
proxy = DFSUtilClient.createClientDatanodeProtocolProxy(fakeDnId, conf, 500, false, fakeBlock);
proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1));
fail("Did not get expected exception: SocketTimeoutException");
} catch (SocketTimeoutException e) {
LOG.info("Got the expected Exception: SocketTimeoutException");
} finally {
if (proxy != null) {
RPC.stopProxy(proxy);
}
server.stop();
}
}
use of java.net.SocketTimeoutException in project hadoop by apache.
the class TestWebHdfsTimeouts method testConnectTimeout.
/**
* Expect connect timeout, because the connection backlog is consumed.
*/
@Test(timeout = TEST_TIMEOUT)
public void testConnectTimeout() throws Exception {
consumeConnectionBacklog();
try {
fs.listFiles(new Path("/"), false);
fail("expected timeout");
} catch (SocketTimeoutException e) {
GenericTestUtils.assertExceptionContains(fs.getUri().getAuthority() + ": connect timed out", e);
}
}
Aggregations