use of java.net.SocketTimeoutException in project hadoop by apache.
the class BackupNode method registerWith.
/**
* Register this backup node with the active name-node.
* @param nsInfo namespace information
* @throws IOException
*/
private void registerWith(NamespaceInfo nsInfo) throws IOException {
BackupImage bnImage = (BackupImage) getFSImage();
NNStorage storage = bnImage.getStorage();
// verify namespaceID
if (storage.getNamespaceID() == 0) {
// new backup storage
storage.setStorageInfo(nsInfo);
storage.setBlockPoolID(nsInfo.getBlockPoolID());
storage.setClusterID(nsInfo.getClusterID());
} else {
nsInfo.validateStorage(storage);
}
bnImage.initEditLog(StartupOption.REGULAR);
setRegistration();
NamenodeRegistration nnReg = null;
while (!isStopRequested()) {
try {
nnReg = namenode.registerSubordinateNamenode(getRegistration());
break;
} catch (SocketTimeoutException e) {
// name-node is busy
LOG.info("Problem connecting to name-node: " + nnRpcAddress);
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
LOG.warn("Encountered exception ", e);
}
}
}
String msg = null;
if (// consider as a rejection
nnReg == null)
msg = "Registration rejected by " + nnRpcAddress;
else if (!nnReg.isRole(NamenodeRole.NAMENODE)) {
msg = "Name-node " + nnRpcAddress + " is not active";
}
if (msg != null) {
msg += ". Shutting down.";
LOG.error(msg);
// stop the node
throw new IOException(msg);
}
nnRpcAddress = nnReg.getAddress();
}
use of java.net.SocketTimeoutException in project hadoop by apache.
the class TestDistributedFileSystem method testDFSClientPeerReadTimeout.
@Test(timeout = 10000)
public void testDFSClientPeerReadTimeout() throws IOException {
final int timeout = 1000;
final Configuration conf = new HdfsConfiguration();
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout);
// only need cluster to create a dfs client to get a peer
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
// use a dummy socket to ensure the read timesout
ServerSocket socket = new ServerSocket(0);
Peer peer = dfs.getClient().newConnectedPeer((InetSocketAddress) socket.getLocalSocketAddress(), null, null);
long start = Time.now();
try {
peer.getInputStream().read();
Assert.fail("read should timeout");
} catch (SocketTimeoutException ste) {
long delta = Time.now() - start;
if (delta < timeout * 0.9) {
throw new IOException("read timedout too soon in " + delta + " ms.", ste);
}
if (delta > timeout * 1.1) {
throw new IOException("read timedout too late in " + delta + " ms.", ste);
}
}
} finally {
cluster.shutdown();
}
}
use of java.net.SocketTimeoutException in project hadoop by apache.
the class TestDistributedFileSystem method testDFSClientPeerWriteTimeout.
@Test(timeout = 10000)
public void testDFSClientPeerWriteTimeout() throws IOException {
final int timeout = 1000;
final Configuration conf = new HdfsConfiguration();
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout);
// only need cluster to create a dfs client to get a peer
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
// Write 10 MB to a dummy socket to ensure the write times out
ServerSocket socket = new ServerSocket(0);
Peer peer = dfs.getClient().newConnectedPeer((InetSocketAddress) socket.getLocalSocketAddress(), null, null);
long start = Time.now();
try {
byte[] buf = new byte[10 * 1024 * 1024];
peer.getOutputStream().write(buf);
long delta = Time.now() - start;
Assert.fail("write finish in " + delta + " ms" + "but should timedout");
} catch (SocketTimeoutException ste) {
long delta = Time.now() - start;
if (delta < timeout * 0.9) {
throw new IOException("write timedout too soon in " + delta + " ms.", ste);
}
if (delta > timeout * 1.2) {
throw new IOException("write timedout too late in " + delta + " ms.", ste);
}
}
} finally {
cluster.shutdown();
}
}
use of java.net.SocketTimeoutException in project hadoop by apache.
the class TestDomainSocket method testServerOptions.
/**
* Test setting some server options.
*
* @throws IOException
*/
@Test(timeout = 180000)
public void testServerOptions() throws Exception {
final String TEST_PATH = new File(sockDir.getDir(), "test_sock_server_options").getAbsolutePath();
DomainSocket serv = DomainSocket.bindAndListen(TEST_PATH);
try {
// Let's set a new receive buffer size
int bufSize = serv.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE);
int newBufSize = bufSize / 2;
serv.setAttribute(DomainSocket.RECEIVE_BUFFER_SIZE, newBufSize);
int nextBufSize = serv.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE);
Assert.assertEquals(newBufSize, nextBufSize);
// Let's set a server timeout
int newTimeout = 1000;
serv.setAttribute(DomainSocket.RECEIVE_TIMEOUT, newTimeout);
int nextTimeout = serv.getAttribute(DomainSocket.RECEIVE_TIMEOUT);
Assert.assertEquals(newTimeout, nextTimeout);
try {
serv.accept();
Assert.fail("expected the accept() to time out and fail");
} catch (SocketTimeoutException e) {
GenericTestUtils.assertExceptionContains("accept(2) error: ", e);
}
} finally {
serv.close();
Assert.assertFalse(serv.isOpen());
}
}
use of java.net.SocketTimeoutException in project hadoop by apache.
the class BlockSender method sendPacket.
/**
* Sends a packet with up to maxChunks chunks of data.
*
* @param pkt buffer used for writing packet data
* @param maxChunks maximum number of chunks to send
* @param out stream to send data to
* @param transferTo use transferTo to send data
* @param throttler used for throttling data transfer bandwidth
*/
private int sendPacket(ByteBuffer pkt, int maxChunks, OutputStream out, boolean transferTo, DataTransferThrottler throttler) throws IOException {
int dataLen = (int) Math.min(endOffset - offset, (chunkSize * (long) maxChunks));
// Number of chunks be sent in the packet
int numChunks = numberOfChunks(dataLen);
int checksumDataLen = numChunks * checksumSize;
int packetLen = dataLen + checksumDataLen + 4;
boolean lastDataPacket = offset + dataLen == endOffset && dataLen > 0;
// The packet buffer is organized as follows:
// _______HHHHCCCCD?D?D?D?
// ^ ^
// | \ checksumOff
// \ headerOff
// _ padding, since the header is variable-length
// H = header and length prefixes
// C = checksums
// D? = data, if transferTo is false.
int headerLen = writePacketHeader(pkt, dataLen, packetLen);
// Per above, the header doesn't start at the beginning of the
// buffer
int headerOff = pkt.position() - headerLen;
int checksumOff = pkt.position();
byte[] buf = pkt.array();
if (checksumSize > 0 && ris.getChecksumIn() != null) {
readChecksum(buf, checksumOff, checksumDataLen);
// write in progress that we need to use to get last checksum
if (lastDataPacket && lastChunkChecksum != null) {
int start = checksumOff + checksumDataLen - checksumSize;
byte[] updatedChecksum = lastChunkChecksum.getChecksum();
if (updatedChecksum != null) {
System.arraycopy(updatedChecksum, 0, buf, start, checksumSize);
}
}
}
int dataOff = checksumOff + checksumDataLen;
if (!transferTo) {
// normal transfer
ris.readDataFully(buf, dataOff, dataLen);
if (verifyChecksum) {
verifyChecksum(buf, dataOff, dataLen, numChunks, checksumOff);
}
}
try {
if (transferTo) {
SocketOutputStream sockOut = (SocketOutputStream) out;
// First write header and checksums
sockOut.write(buf, headerOff, dataOff - headerOff);
// no need to flush since we know out is not a buffered stream
FileChannel fileCh = ((FileInputStream) ris.getDataIn()).getChannel();
LongWritable waitTime = new LongWritable();
LongWritable transferTime = new LongWritable();
fileIoProvider.transferToSocketFully(ris.getVolumeRef().getVolume(), sockOut, fileCh, blockInPosition, dataLen, waitTime, transferTime);
datanode.metrics.addSendDataPacketBlockedOnNetworkNanos(waitTime.get());
datanode.metrics.addSendDataPacketTransferNanos(transferTime.get());
blockInPosition += dataLen;
} else {
// normal transfer
out.write(buf, headerOff, dataOff + dataLen - headerOff);
}
} catch (IOException e) {
if (e instanceof SocketTimeoutException) {
/*
* writing to client timed out. This happens if the client reads
* part of a block and then decides not to read the rest (but leaves
* the socket open).
*
* Reporting of this case is done in DataXceiver#run
*/
} else {
/* Exception while writing to the client. Connection closure from
* the other end is mostly the case and we do not care much about
* it. But other things can go wrong, especially in transferTo(),
* which we do not want to ignore.
*
* The message parsing below should not be considered as a good
* coding example. NEVER do it to drive a program logic. NEVER.
* It was done here because the NIO throws an IOException for EPIPE.
*/
String ioem = e.getMessage();
if (!ioem.startsWith("Broken pipe") && !ioem.startsWith("Connection reset")) {
LOG.error("BlockSender.sendChunks() exception: ", e);
datanode.getBlockScanner().markSuspectBlock(ris.getVolumeRef().getVolume().getStorageID(), block);
}
}
throw ioeToSocketException(e);
}
if (throttler != null) {
// rebalancing so throttle
throttler.throttle(packetLen);
}
return dataLen;
}
Aggregations