Search in sources :

Example 1 with InterruptedIOException

use of java.io.InterruptedIOException in project hadoop by apache.

the class Shell method runCommand.

/** Run the command. */
private void runCommand() throws IOException {
    ProcessBuilder builder = new ProcessBuilder(getExecString());
    Timer timeOutTimer = null;
    ShellTimeoutTimerTask timeoutTimerTask = null;
    timedOut.set(false);
    completed.set(false);
    // the parent process.
    if (!inheritParentEnv) {
        builder.environment().clear();
    }
    if (environment != null) {
        builder.environment().putAll(this.environment);
    }
    if (dir != null) {
        builder.directory(this.dir);
    }
    builder.redirectErrorStream(redirectErrorStream);
    if (Shell.WINDOWS) {
        synchronized (WindowsProcessLaunchLock) {
            // To workaround the race condition issue with child processes
            // inheriting unintended handles during process launch that can
            // lead to hangs on reading output and error streams, we
            // serialize process creation. More info available at:
            // http://support.microsoft.com/kb/315939
            process = builder.start();
        }
    } else {
        process = builder.start();
    }
    waitingThread = Thread.currentThread();
    CHILD_SHELLS.put(this, null);
    if (timeOutInterval > 0) {
        timeOutTimer = new Timer("Shell command timeout");
        timeoutTimerTask = new ShellTimeoutTimerTask(this);
        //One time scheduling.
        timeOutTimer.schedule(timeoutTimerTask, timeOutInterval);
    }
    final BufferedReader errReader = new BufferedReader(new InputStreamReader(process.getErrorStream(), Charset.defaultCharset()));
    BufferedReader inReader = new BufferedReader(new InputStreamReader(process.getInputStream(), Charset.defaultCharset()));
    final StringBuffer errMsg = new StringBuffer();
    // read error and input streams as this would free up the buffers
    // free the error stream buffer
    Thread errThread = new Thread() {

        @Override
        public void run() {
            try {
                String line = errReader.readLine();
                while ((line != null) && !isInterrupted()) {
                    errMsg.append(line);
                    errMsg.append(System.getProperty("line.separator"));
                    line = errReader.readLine();
                }
            } catch (IOException ioe) {
                // so only log a WARN if the command didn't time out
                if (!isTimedOut()) {
                    LOG.warn("Error reading the error stream", ioe);
                } else {
                    LOG.debug("Error reading the error stream due to shell " + "command timeout", ioe);
                }
            }
        }
    };
    try {
        errThread.start();
    } catch (IllegalStateException ise) {
    } catch (OutOfMemoryError oe) {
        LOG.error("Caught " + oe + ". One possible reason is that ulimit" + " setting of 'max user processes' is too low. If so, do" + " 'ulimit -u <largerNum>' and try again.");
        throw oe;
    }
    try {
        // parse the output
        parseExecResult(inReader);
        // clear the input stream buffer
        String line = inReader.readLine();
        while (line != null) {
            line = inReader.readLine();
        }
        // wait for the process to finish and check the exit code
        exitCode = process.waitFor();
        // make sure that the error thread exits
        joinThread(errThread);
        completed.set(true);
        //taken care in finally block
        if (exitCode != 0) {
            throw new ExitCodeException(exitCode, errMsg.toString());
        }
    } catch (InterruptedException ie) {
        InterruptedIOException iie = new InterruptedIOException(ie.toString());
        iie.initCause(ie);
        throw iie;
    } finally {
        if (timeOutTimer != null) {
            timeOutTimer.cancel();
        }
        // close the input stream
        try {
            // JDK 7 tries to automatically drain the input streams for us
            // when the process exits, but since close is not synchronized,
            // it creates a race if we close the stream first and the same
            // fd is recycled.  the stream draining thread will attempt to
            // drain that fd!!  it may block, OOM, or cause bizarre behavior
            // see: https://bugs.openjdk.java.net/browse/JDK-8024521
            //      issue is fixed in build 7u60
            InputStream stdout = process.getInputStream();
            synchronized (stdout) {
                inReader.close();
            }
        } catch (IOException ioe) {
            LOG.warn("Error while closing the input stream", ioe);
        }
        if (!completed.get()) {
            errThread.interrupt();
            joinThread(errThread);
        }
        try {
            InputStream stderr = process.getErrorStream();
            synchronized (stderr) {
                errReader.close();
            }
        } catch (IOException ioe) {
            LOG.warn("Error while closing the error stream", ioe);
        }
        process.destroy();
        waitingThread = null;
        CHILD_SHELLS.remove(this);
        lastTime = Time.monotonicNow();
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) InputStreamReader(java.io.InputStreamReader) InputStream(java.io.InputStream) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException) Timer(java.util.Timer) BufferedReader(java.io.BufferedReader)

Example 2 with InterruptedIOException

use of java.io.InterruptedIOException in project hadoop by apache.

the class TestCopy method testInterruptedCreate.

@Test
public void testInterruptedCreate() throws Exception {
    whenFsCreate().thenThrow(new InterruptedIOException());
    when(mockFs.getFileStatus(eq(tmpPath))).thenReturn(fileStat);
    FSDataInputStream in = mock(FSDataInputStream.class);
    tryCopyStream(in, false);
    verify(mockFs).delete(eq(tmpPath), anyBoolean());
    verify(mockFs, never()).rename(any(Path.class), any(Path.class));
    verify(mockFs, never()).delete(eq(path), anyBoolean());
    verify(mockFs, never()).close();
}
Also used : Path(org.apache.hadoop.fs.Path) InterruptedIOException(java.io.InterruptedIOException) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Test(org.junit.Test)

Example 3 with InterruptedIOException

use of java.io.InterruptedIOException in project hadoop by apache.

the class DataXceiver method run.

/**
   * Read/write data from/to the DataXceiverServer.
   */
@Override
public void run() {
    int opsProcessed = 0;
    Op op = null;
    try {
        synchronized (this) {
            xceiver = Thread.currentThread();
        }
        dataXceiverServer.addPeer(peer, Thread.currentThread(), this);
        peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
        InputStream input = socketIn;
        try {
            IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut, socketIn, datanode.getXferAddress().getPort(), datanode.getDatanodeId());
            input = new BufferedInputStream(saslStreams.in, smallBufferSize);
            socketOut = saslStreams.out;
        } catch (InvalidMagicNumberException imne) {
            if (imne.isHandshake4Encryption()) {
                LOG.info("Failed to read expected encryption handshake from client " + "at " + peer.getRemoteAddressString() + ". Perhaps the client " + "is running an older version of Hadoop which does not support " + "encryption");
            } else {
                LOG.info("Failed to read expected SASL data transfer protection " + "handshake from client at " + peer.getRemoteAddressString() + ". Perhaps the client is running an older version of Hadoop " + "which does not support SASL data transfer protection");
            }
            return;
        }
        super.initialize(new DataInputStream(input));
        // Setting keepalive timeout to 0 disable this behavior.
        do {
            updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));
            try {
                if (opsProcessed != 0) {
                    assert dnConf.socketKeepaliveTimeout > 0;
                    peer.setReadTimeout(dnConf.socketKeepaliveTimeout);
                } else {
                    peer.setReadTimeout(dnConf.socketTimeout);
                }
                op = readOp();
            } catch (InterruptedIOException ignored) {
                // Time out while we wait for client rpc
                break;
            } catch (EOFException | ClosedChannelException e) {
                // Since we optimistically expect the next op, it's quite normal to
                // get EOF here.
                LOG.debug("Cached {} closing after {} ops.  " + "This message is usually benign.", peer, opsProcessed);
                break;
            } catch (IOException err) {
                incrDatanodeNetworkErrors();
                throw err;
            }
            // restore normal timeout
            if (opsProcessed != 0) {
                peer.setReadTimeout(dnConf.socketTimeout);
            }
            opStartTime = monotonicNow();
            processOp(op);
            ++opsProcessed;
        } while ((peer != null) && (!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0));
    } catch (Throwable t) {
        String s = datanode.getDisplayName() + ":DataXceiver error processing " + ((op == null) ? "unknown" : op.name()) + " operation " + " src: " + remoteAddress + " dst: " + localAddress;
        if (op == Op.WRITE_BLOCK && t instanceof ReplicaAlreadyExistsException) {
            // at the same time.
            if (LOG.isTraceEnabled()) {
                LOG.trace(s, t);
            } else {
                LOG.info(s + "; " + t);
            }
        } else if (op == Op.READ_BLOCK && t instanceof SocketTimeoutException) {
            String s1 = "Likely the client has stopped reading, disconnecting it";
            s1 += " (" + s + ")";
            if (LOG.isTraceEnabled()) {
                LOG.trace(s1, t);
            } else {
                LOG.info(s1 + "; " + t);
            }
        } else if (t instanceof InvalidToken) {
            // checkAccess() method and this is not a server error.
            if (LOG.isTraceEnabled()) {
                LOG.trace(s, t);
            }
        } else {
            LOG.error(s, t);
        }
    } finally {
        collectThreadLocalStates();
        if (LOG.isDebugEnabled()) {
            LOG.debug(datanode.getDisplayName() + ":Number of active connections is: " + datanode.getXceiverCount());
        }
        updateCurrentThreadName("Cleaning up");
        if (peer != null) {
            dataXceiverServer.closePeer(peer);
            IOUtils.closeStream(in);
        }
    }
}
Also used : Op(org.apache.hadoop.hdfs.protocol.datatransfer.Op) InterruptedIOException(java.io.InterruptedIOException) ClosedChannelException(java.nio.channels.ClosedChannelException) BufferedInputStream(java.io.BufferedInputStream) DataInputStream(java.io.DataInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) InvalidMagicNumberException(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.InvalidMagicNumberException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ByteString(com.google.protobuf.ByteString) DataInputStream(java.io.DataInputStream) SocketTimeoutException(java.net.SocketTimeoutException) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) BufferedInputStream(java.io.BufferedInputStream) EOFException(java.io.EOFException) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken)

Example 4 with InterruptedIOException

use of java.io.InterruptedIOException in project hadoop by apache.

the class TestCopy method testInterruptedRename.

@Test
public void testInterruptedRename() throws Exception {
    FSDataOutputStream out = mock(FSDataOutputStream.class);
    whenFsCreate().thenReturn(out);
    when(mockFs.getFileStatus(eq(tmpPath))).thenReturn(fileStat);
    when(mockFs.rename(eq(tmpPath), eq(path))).thenThrow(new InterruptedIOException());
    FSInputStream in = mock(FSInputStream.class);
    when(in.read(any(byte[].class), anyInt(), anyInt())).thenReturn(-1);
    tryCopyStream(in, false);
    verify(mockFs).delete(eq(tmpPath), anyBoolean());
    verify(mockFs).rename(eq(tmpPath), eq(path));
    verify(mockFs, never()).delete(eq(path), anyBoolean());
    verify(mockFs, never()).close();
}
Also used : InterruptedIOException(java.io.InterruptedIOException) FSInputStream(org.apache.hadoop.fs.FSInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 5 with InterruptedIOException

use of java.io.InterruptedIOException in project hadoop by apache.

the class DFSInputStream method chooseDataNode.

private DNAddrPair chooseDataNode(LocatedBlock block, Collection<DatanodeInfo> ignoredNodes) throws IOException {
    while (true) {
        DNAddrPair result = getBestNodeDNAddrPair(block, ignoredNodes);
        if (result != null) {
            return result;
        } else {
            String errMsg = getBestNodeDNAddrPairErrorString(block.getLocations(), deadNodes, ignoredNodes);
            String blockInfo = block.getBlock() + " file=" + src;
            if (failures >= dfsClient.getConf().getMaxBlockAcquireFailures()) {
                String description = "Could not obtain block: " + blockInfo;
                DFSClient.LOG.warn(description + errMsg + ". Throwing a BlockMissingException");
                throw new BlockMissingException(src, description, block.getStartOffset());
            }
            DatanodeInfo[] nodes = block.getLocations();
            if (nodes == null || nodes.length == 0) {
                DFSClient.LOG.info("No node available for " + blockInfo);
            }
            DFSClient.LOG.info("Could not obtain " + block.getBlock() + " from any node: " + errMsg + ". Will get new block locations from namenode and retry...");
            try {
                // Introducing a random factor to the wait time before another retry.
                // The wait time is dependent on # of failures and a random factor.
                // At the first time of getting a BlockMissingException, the wait time
                // is a random number between 0..3000 ms. If the first retry
                // still fails, we will wait 3000 ms grace period before the 2nd retry.
                // Also at the second retry, the waiting window is expanded to 6000 ms
                // alleviating the request rate from the server. Similarly the 3rd retry
                // will wait 6000ms grace period before retry and the waiting window is
                // expanded to 9000ms.
                final int timeWindow = dfsClient.getConf().getTimeWindow();
                double waitTime = // grace period for the last round of attempt
                timeWindow * failures + // expanding time window for each failure
                timeWindow * (failures + 1) * ThreadLocalRandom.current().nextDouble();
                DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) + " IOException, will wait for " + waitTime + " msec.");
                Thread.sleep((long) waitTime);
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                throw new InterruptedIOException("Interrupted while choosing DataNode for read.");
            }
            //2nd option is to remove only nodes[blockId]
            deadNodes.clear();
            openInfo(true);
            block = refreshLocatedBlock(block);
            failures++;
        }
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo)

Aggregations

InterruptedIOException (java.io.InterruptedIOException)274 IOException (java.io.IOException)186 Test (org.junit.Test)39 ArrayList (java.util.ArrayList)27 Socket (java.net.Socket)26 ConnectException (java.net.ConnectException)22 ExecutionException (java.util.concurrent.ExecutionException)22 InputStream (java.io.InputStream)21 InetSocketAddress (java.net.InetSocketAddress)21 ByteBuffer (java.nio.ByteBuffer)21 Path (org.apache.hadoop.fs.Path)20 NoRouteToHostException (java.net.NoRouteToHostException)19 ServletException (javax.servlet.ServletException)17 CountDownLatch (java.util.concurrent.CountDownLatch)16 SocketTimeoutException (java.net.SocketTimeoutException)15 HttpServletRequest (javax.servlet.http.HttpServletRequest)15 HttpServletResponse (javax.servlet.http.HttpServletResponse)15 EOFException (java.io.EOFException)14 SocketException (java.net.SocketException)14 OutputStream (java.io.OutputStream)13