Search in sources :

Example 51 with ClosedByInterruptException

use of java.nio.channels.ClosedByInterruptException in project google-cloud-java by GoogleCloudPlatform.

the class ExceptionHandlerTest method testShouldTry.

@Test
public void testShouldTry() {
    ExceptionHandler handler = ExceptionHandler.newBuilder().retryOn(IOException.class).build();
    assertTrue(handler.accept(new IOException()));
    assertTrue(handler.accept(new ClosedByInterruptException()));
    assertFalse(handler.accept(new RuntimeException()));
    ExceptionHandler.Builder builder = ExceptionHandler.newBuilder().retryOn(IOException.class, NullPointerException.class).abortOn(RuntimeException.class, ClosedByInterruptException.class, InterruptedException.class);
    handler = builder.build();
    assertTrue(handler.accept(new IOException()));
    assertFalse(handler.accept(new ClosedByInterruptException()));
    assertFalse(handler.accept(new InterruptedException()));
    assertFalse(handler.accept(new RuntimeException()));
    assertTrue(handler.accept(new NullPointerException()));
    final AtomicReference<RetryResult> before = new AtomicReference<>(RetryResult.NO_RETRY);
    @SuppressWarnings("serial") Interceptor interceptor = new Interceptor() {

        @Override
        public RetryResult afterEval(Exception exception, RetryResult retryResult) {
            return retryResult == RetryResult.NO_RETRY ? RetryResult.RETRY : RetryResult.NO_RETRY;
        }

        @Override
        public RetryResult beforeEval(Exception exception) {
            return before.get();
        }
    };
    builder.addInterceptors(interceptor);
    handler = builder.build();
    assertFalse(handler.accept(new IOException()));
    assertFalse(handler.accept(new ClosedByInterruptException()));
    assertFalse(handler.accept(new InterruptedException()));
    assertFalse(handler.accept(new RuntimeException()));
    assertFalse(handler.accept(new NullPointerException()));
    before.set(RetryResult.RETRY);
    assertTrue(handler.accept(new IOException()));
    assertTrue(handler.accept(new ClosedByInterruptException()));
    assertTrue(handler.accept(new InterruptedException()));
    assertTrue(handler.accept(new RuntimeException()));
    assertTrue(handler.accept(new NullPointerException()));
    before.set(RetryResult.CONTINUE_EVALUATION);
    assertFalse(handler.accept(new IOException()));
    assertTrue(handler.accept(new ClosedByInterruptException()));
    assertTrue(handler.accept(new InterruptedException()));
    assertTrue(handler.accept(new RuntimeException()));
    assertFalse(handler.accept(new NullPointerException()));
}
Also used : RetryResult(com.google.cloud.ExceptionHandler.Interceptor.RetryResult) AtomicReference(java.util.concurrent.atomic.AtomicReference) IOException(java.io.IOException) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) ExpectedException(org.junit.rules.ExpectedException) ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) Interceptor(com.google.cloud.ExceptionHandler.Interceptor) Test(org.junit.Test)

Example 52 with ClosedByInterruptException

use of java.nio.channels.ClosedByInterruptException in project nifi by apache.

the class StandardSocketChannelHandler method run.

@Override
public void run() {
    boolean eof = false;
    SocketChannel socketChannel = null;
    try {
        int bytesRead;
        socketChannel = (SocketChannel) key.channel();
        final SocketChannelAttachment attachment = (SocketChannelAttachment) key.attachment();
        final ByteBuffer socketBuffer = attachment.getByteBuffer();
        // read until the buffer is full
        while ((bytesRead = socketChannel.read(socketBuffer)) > 0) {
            // prepare byte buffer for reading
            socketBuffer.flip();
            // mark the current position as start, in case of partial message read
            socketBuffer.mark();
            // process the contents that have been read into the buffer
            processBuffer(socketChannel, socketBuffer);
            // Preserve bytes in buffer for next call to run
            // NOTE: This code could benefit from the  two ByteBuffer read calls to avoid
            // this compact for higher throughput
            socketBuffer.reset();
            socketBuffer.compact();
            logger.debug("bytes read {}", new Object[] { bytesRead });
        }
        // Check for closed socket
        if (bytesRead < 0) {
            eof = true;
            logger.debug("Reached EOF, closing connection");
        } else {
            logger.debug("No more data available, returning for selection");
        }
    } catch (ClosedByInterruptException | InterruptedException e) {
        logger.debug("read loop interrupted, closing connection");
        // Treat same as closed socket
        eof = true;
    } catch (ClosedChannelException e) {
        // ClosedChannelException doesn't have a message so handle it separately from IOException
        logger.error("Error reading from channel due to channel being closed", e);
        // Treat same as closed socket
        eof = true;
    } catch (IOException e) {
        logger.error("Error reading from channel due to {}", new Object[] { e.getMessage() }, e);
        // Treat same as closed socket
        eof = true;
    } finally {
        if (eof == true) {
            IOUtils.closeQuietly(socketChannel);
            dispatcher.completeConnection(key);
        } else {
            dispatcher.addBackForSelection(key);
        }
    }
}
Also used : ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) SocketChannel(java.nio.channels.SocketChannel) ClosedChannelException(java.nio.channels.ClosedChannelException) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) SocketChannelAttachment(org.apache.nifi.processor.util.listen.dispatcher.SocketChannelAttachment)

Example 53 with ClosedByInterruptException

use of java.nio.channels.ClosedByInterruptException in project hadoop by apache.

the class TestRead method testInterruptReader.

@Test(timeout = 60000)
public void testInterruptReader() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_DATANODE_FSDATASET_FACTORY_KEY, DelayedSimulatedFSDataset.Factory.class.getName());
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    final DistributedFileSystem fs = cluster.getFileSystem();
    try {
        cluster.waitActive();
        final Path file = new Path("/foo");
        DFSTestUtil.createFile(fs, file, 1024, (short) 1, 0L);
        final FSDataInputStream in = fs.open(file);
        AtomicBoolean readInterrupted = new AtomicBoolean(false);
        final Thread reader = new Thread(new Runnable() {

            @Override
            public void run() {
                try {
                    in.read(new byte[1024], 0, 1024);
                } catch (IOException e) {
                    if (e instanceof ClosedByInterruptException || e instanceof InterruptedIOException) {
                        readInterrupted.set(true);
                    }
                }
            }
        });
        reader.start();
        Thread.sleep(1000);
        reader.interrupt();
        reader.join();
        Assert.assertTrue(readInterrupted.get());
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) InterruptedIOException(java.io.InterruptedIOException) Configuration(org.apache.hadoop.conf.Configuration) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException) ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Test(org.junit.Test)

Example 54 with ClosedByInterruptException

use of java.nio.channels.ClosedByInterruptException in project hadoop by apache.

the class TestBlockReaderFactory method testPurgingClosedReplicas.

/**
   * When an InterruptedException is sent to a thread calling
   * FileChannel#read, the FileChannel is immediately closed and the
   * thread gets an exception.  This effectively means that we might have
   * someone asynchronously calling close() on the file descriptors we use
   * in BlockReaderLocal.  So when unreferencing a ShortCircuitReplica in
   * ShortCircuitCache#unref, we should check if the FileChannel objects
   * are still open.  If not, we should purge the replica to avoid giving
   * it out to any future readers.
   *
   * This is a regression test for HDFS-6227: Short circuit read failed
   * due to ClosedChannelException.
   *
   * Note that you may still get ClosedChannelException errors if two threads
   * are reading from the same replica and an InterruptedException is delivered
   * to one of them.
   */
@Test(timeout = 120000)
public void testPurgingClosedReplicas() throws Exception {
    BlockReaderTestUtil.enableBlockReaderFactoryTracing();
    final AtomicInteger replicasCreated = new AtomicInteger(0);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {

        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            replicasCreated.incrementAndGet();
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testPurgingClosedReplicas", sockDir);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4095;
    final int SEED = 0xFADE0;
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), conf);
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    final Semaphore sem = new Semaphore(0);
    final List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer().getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
    // first block
    final LocatedBlock lblock = locatedBlocks.get(0);
    final byte[] buf = new byte[TEST_FILE_LEN];
    Runnable readerRunnable = new Runnable() {

        @Override
        public void run() {
            try {
                while (true) {
                    BlockReader blockReader = null;
                    try {
                        blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
                        sem.release();
                        try {
                            blockReader.readAll(buf, 0, TEST_FILE_LEN);
                        } finally {
                            sem.acquireUninterruptibly();
                        }
                    } catch (ClosedByInterruptException e) {
                        LOG.info("got the expected ClosedByInterruptException", e);
                        sem.release();
                        break;
                    } finally {
                        if (blockReader != null)
                            blockReader.close();
                    }
                    LOG.info("read another " + TEST_FILE_LEN + " bytes.");
                }
            } catch (Throwable t) {
                LOG.error("getBlockReader failure", t);
                testFailed.set(true);
                sem.release();
            }
        }
    };
    Thread thread = new Thread(readerRunnable);
    thread.start();
    // These should trigger a ClosedChannelException.
    while (thread.isAlive()) {
        sem.acquireUninterruptibly();
        thread.interrupt();
        sem.release();
    }
    Assert.assertFalse(testFailed.get());
    // We should be able to read from the file without
    // getting a ClosedChannelException.
    BlockReader blockReader = null;
    try {
        blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
        blockReader.readFully(buf, 0, TEST_FILE_LEN);
    } finally {
        if (blockReader != null)
            blockReader.close();
    }
    byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(buf, expected));
    // Another ShortCircuitReplica object should have been created.
    Assert.assertEquals(2, replicasCreated.get());
    dfs.close();
    cluster.shutdown();
    sockDir.close();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) BlockReader(org.apache.hadoop.hdfs.BlockReader) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Semaphore(java.util.concurrent.Semaphore) ShortCircuitCache(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ShortCircuitReplicaInfo(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplicaInfo) Test(org.junit.Test)

Example 55 with ClosedByInterruptException

use of java.nio.channels.ClosedByInterruptException in project graal by oracle.

the class IgvDumpChannel method createNetworkChannel.

private static WritableByteChannel createNetworkChannel(Supplier<Path> pathProvider, OptionValues options) throws IOException {
    String host = PrintGraphHost.getValue(options);
    int port = PrintBinaryGraphPort.getValue(options);
    try {
        WritableByteChannel channel = SocketChannel.open(new InetSocketAddress(host, port));
        TTY.println("Connected to the IGV on %s:%d", host, port);
        return channel;
    } catch (ClosedByInterruptException | InterruptedIOException e) {
        /*
             * Interrupts should not count as errors because they may be caused by a cancelled Graal
             * compilation. ClosedByInterruptException occurs if the SocketChannel could not be
             * opened. InterruptedIOException occurs if new Socket(..) was interrupted.
             */
        return null;
    } catch (IOException e) {
        if (!DebugOptions.PrintGraphFile.hasBeenSet(options)) {
            return createFileChannel(pathProvider);
        } else {
            throw new IOException(String.format("Could not connect to the IGV on %s:%d", host, port), e);
        }
    }
}
Also used : ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) InterruptedIOException(java.io.InterruptedIOException) InetSocketAddress(java.net.InetSocketAddress) WritableByteChannel(java.nio.channels.WritableByteChannel) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException)

Aggregations

ClosedByInterruptException (java.nio.channels.ClosedByInterruptException)81 IOException (java.io.IOException)48 ByteBuffer (java.nio.ByteBuffer)15 ClosedChannelException (java.nio.channels.ClosedChannelException)11 SocketTimeoutException (java.net.SocketTimeoutException)9 InetSocketAddress (java.net.InetSocketAddress)7 MappedByteBuffer (java.nio.MappedByteBuffer)7 SocketChannel (java.nio.channels.SocketChannel)7 File (java.io.File)6 ServerSocketChannel (java.nio.channels.ServerSocketChannel)6 ServerSocket (java.net.ServerSocket)5 FileChannel (java.nio.channels.FileChannel)5 FileLockInterruptionException (java.nio.channels.FileLockInterruptionException)5 InterruptedIOException (java.io.InterruptedIOException)4 Path (java.nio.file.Path)4 Test (org.junit.Test)4 BuildId (com.facebook.buck.model.BuildId)3 FileNotFoundException (java.io.FileNotFoundException)3 InputStream (java.io.InputStream)3 SocketException (java.net.SocketException)3