use of java.net.Socket in project hadoop by apache.
the class DataStreamer method createSocketForPipeline.
/**
* Create a socket for a write pipeline
*
* @param first the first datanode
* @param length the pipeline length
* @param client client
* @return the socket connected to the first datanode
*/
static Socket createSocketForPipeline(final DatanodeInfo first, final int length, final DFSClient client) throws IOException {
final DfsClientConf conf = client.getConf();
final String dnAddr = first.getXferAddr(conf.isConnectToDnViaHostname());
LOG.debug("Connecting to datanode {}", dnAddr);
final InetSocketAddress isa = NetUtils.createSocketAddr(dnAddr);
final Socket sock = client.socketFactory.createSocket();
final int timeout = client.getDatanodeReadTimeout(length);
NetUtils.connect(sock, isa, client.getRandomLocalInterfaceAddr(), conf.getSocketTimeout());
sock.setTcpNoDelay(conf.getDataTransferTcpNoDelay());
sock.setSoTimeout(timeout);
sock.setKeepAlive(true);
if (conf.getSocketSendBufferSize() > 0) {
sock.setSendBufferSize(conf.getSocketSendBufferSize());
}
LOG.debug("Send buf size {}", sock.getSendBufferSize());
return sock;
}
use of java.net.Socket in project hadoop by apache.
the class NamenodeFsck method copyBlock.
/*
* XXX (ab) Bulk of this method is copied verbatim from {@link DFSClient}, which is
* bad. Both places should be refactored to provide a method to copy blocks
* around.
*/
private void copyBlock(final DFSClient dfs, LocatedBlock lblock, OutputStream fos) throws Exception {
int failures = 0;
InetSocketAddress targetAddr = null;
TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
BlockReader blockReader = null;
ExtendedBlock block = lblock.getBlock();
while (blockReader == null) {
DatanodeInfo chosenNode;
try {
chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
} catch (IOException ie) {
if (failures >= HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
throw new IOException("Could not obtain block " + lblock, ie);
}
LOG.info("Could not obtain block from any node: " + ie);
try {
Thread.sleep(10000);
} catch (InterruptedException iex) {
}
deadNodes.clear();
failures++;
continue;
}
try {
String file = BlockReaderFactory.getFileName(targetAddr, block.getBlockPoolId(), block.getBlockId());
blockReader = new BlockReaderFactory(dfs.getConf()).setFileName(file).setBlock(block).setBlockToken(lblock.getBlockToken()).setStartOffset(0).setLength(block.getNumBytes()).setVerifyChecksum(true).setClientName("fsck").setDatanodeInfo(chosenNode).setInetSocketAddress(targetAddr).setCachingStrategy(CachingStrategy.newDropBehind()).setClientCacheContext(dfs.getClientContext()).setConfiguration(namenode.getConf()).setTracer(tracer).setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException {
Peer peer = null;
Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket();
try {
s.connect(addr, HdfsConstants.READ_TIMEOUT);
s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
peer = DFSUtilClient.peerFromSocketAndKey(dfs.getSaslDataTransferClient(), s, NamenodeFsck.this, blockToken, datanodeId, HdfsConstants.READ_TIMEOUT);
} finally {
if (peer == null) {
IOUtils.closeQuietly(s);
}
}
return peer;
}
}).build();
} catch (IOException ex) {
// Put chosen node into dead list, continue
LOG.info("Failed to connect to " + targetAddr + ":" + ex);
deadNodes.add(chosenNode);
}
}
byte[] buf = new byte[1024];
int cnt = 0;
boolean success = true;
long bytesRead = 0;
try {
while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
fos.write(buf, 0, cnt);
bytesRead += cnt;
}
if (bytesRead != block.getNumBytes()) {
throw new IOException("Recorded block size is " + block.getNumBytes() + ", but datanode returned " + bytesRead + " bytes");
}
} catch (Exception e) {
LOG.error("Error reading block", e);
success = false;
} finally {
blockReader.close();
}
if (!success) {
throw new Exception("Could not copy block data for " + lblock.getBlock());
}
}
use of java.net.Socket in project hadoop by apache.
the class TestBlockReplacement method replaceBlock.
/*
* Replace block
*/
private boolean replaceBlock(ExtendedBlock block, DatanodeInfo source, DatanodeInfo sourceProxy, DatanodeInfo destination, StorageType targetStorageType, Status opStatus) throws IOException, SocketException {
Socket sock = new Socket();
try {
sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()), HdfsConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
// sendRequest
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
new Sender(out).replaceBlock(block, targetStorageType, BlockTokenSecretManager.DUMMY_TOKEN, source.getDatanodeUuid(), sourceProxy);
out.flush();
// receiveResponse
DataInputStream reply = new DataInputStream(sock.getInputStream());
BlockOpResponseProto proto = BlockOpResponseProto.parseDelimitedFrom(reply);
while (proto.getStatus() == Status.IN_PROGRESS) {
proto = BlockOpResponseProto.parseDelimitedFrom(reply);
}
return proto.getStatus() == opStatus;
} finally {
sock.close();
}
}
use of java.net.Socket in project hadoop by apache.
the class TestLdapGroupsMapping method testLdapConnectionTimeout.
/**
* Test that if the {@link LdapGroupsMapping#CONNECTION_TIMEOUT} is set in the
* configuration, the LdapGroupsMapping connection will timeout by this value
* if it does not get a LDAP response from the server.
* @throws IOException
* @throws InterruptedException
*/
@Test(timeout = 30000)
public void testLdapConnectionTimeout() throws IOException, InterruptedException {
// 3s
final int connectionTimeoutMs = 3 * 1000;
try (ServerSocket serverSock = new ServerSocket(0)) {
final CountDownLatch finLatch = new CountDownLatch(1);
// Below we create a LDAP server which will accept a client request;
// but it will never reply to the bind (connect) request.
// Client of this LDAP server is expected to get a connection timeout.
final Thread ldapServer = new Thread(new Runnable() {
@Override
public void run() {
try {
try (Socket ignored = serverSock.accept()) {
finLatch.await();
}
} catch (Exception e) {
e.printStackTrace();
}
}
});
ldapServer.start();
final LdapGroupsMapping mapping = new LdapGroupsMapping();
final Configuration conf = new Configuration();
conf.set(LdapGroupsMapping.LDAP_URL_KEY, "ldap://localhost:" + serverSock.getLocalPort());
conf.setInt(CONNECTION_TIMEOUT, connectionTimeoutMs);
mapping.setConf(conf);
try {
mapping.doGetGroups("hadoop", 1);
fail("The LDAP query should have timed out!");
} catch (NamingException ne) {
LOG.debug("Got the exception while LDAP querying: ", ne);
assertExceptionContains("LDAP response read timed out, timeout used:" + connectionTimeoutMs + "ms", ne);
assertFalse(ne.getMessage().contains("remaining name"));
} finally {
finLatch.countDown();
}
ldapServer.join();
}
}
use of java.net.Socket in project hadoop by apache.
the class TestLdapGroupsMapping method testLdapReadTimeout.
/**
* Test that if the {@link LdapGroupsMapping#READ_TIMEOUT} is set in the
* configuration, the LdapGroupsMapping query will timeout by this value if
* it does not get a LDAP response from the server.
*
* @throws IOException
* @throws InterruptedException
*/
@Test(timeout = 30000)
public void testLdapReadTimeout() throws IOException, InterruptedException {
// 4s
final int readTimeoutMs = 4 * 1000;
try (ServerSocket serverSock = new ServerSocket(0)) {
final CountDownLatch finLatch = new CountDownLatch(1);
// Below we create a LDAP server which will accept a client request,
// authenticate it successfully; but it will never reply to the following
// query request.
// Client of this LDAP server is expected to get a read timeout.
final Thread ldapServer = new Thread(new Runnable() {
@Override
public void run() {
try {
try (Socket clientSock = serverSock.accept()) {
IOUtils.skipFully(clientSock.getInputStream(), 1);
clientSock.getOutputStream().write(AUTHENTICATE_SUCCESS_MSG);
finLatch.await();
}
} catch (Exception e) {
e.printStackTrace();
}
}
});
ldapServer.start();
final LdapGroupsMapping mapping = new LdapGroupsMapping();
final Configuration conf = new Configuration();
conf.set(LdapGroupsMapping.LDAP_URL_KEY, "ldap://localhost:" + serverSock.getLocalPort());
conf.setInt(READ_TIMEOUT, readTimeoutMs);
mapping.setConf(conf);
try {
mapping.doGetGroups("hadoop", 1);
fail("The LDAP query should have timed out!");
} catch (NamingException ne) {
LOG.debug("Got the exception while LDAP querying: ", ne);
assertExceptionContains("LDAP response read timed out, timeout used:" + readTimeoutMs + "ms", ne);
assertExceptionContains("remaining name", ne);
} finally {
finLatch.countDown();
}
ldapServer.join();
}
}
Aggregations