Search in sources :

Example 1 with ClosedChannelException

use of java.nio.channels.ClosedChannelException in project flink by apache.

the class KvStateClientTest method testServerClosesChannel.

/**
	 * Tests that a server channel close, closes the connection and removes it
	 * from the established connections.
	 */
@Test
public void testServerClosesChannel() throws Exception {
    Deadline deadline = TEST_TIMEOUT.fromNow();
    AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
    KvStateClient client = null;
    Channel serverChannel = null;
    try {
        client = new KvStateClient(1, stats);
        final AtomicBoolean received = new AtomicBoolean();
        final AtomicReference<Channel> channel = new AtomicReference<>();
        serverChannel = createServerChannel(new ChannelInboundHandlerAdapter() {

            @Override
            public void channelActive(ChannelHandlerContext ctx) throws Exception {
                channel.set(ctx.channel());
            }

            @Override
            public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
                received.set(true);
            }
        });
        KvStateServerAddress serverAddress = getKvStateServerAddress(serverChannel);
        // Requests
        Future<byte[]> future = client.getKvState(serverAddress, new KvStateID(), new byte[0]);
        while (!received.get() && deadline.hasTimeLeft()) {
            Thread.sleep(50);
        }
        assertTrue("Receive timed out", received.get());
        assertEquals(1, stats.getNumConnections());
        channel.get().close().await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
        try {
            Await.result(future, deadline.timeLeft());
            fail("Did not throw expected server failure");
        } catch (ClosedChannelException ignored) {
        // Expected
        }
        assertEquals(0, stats.getNumConnections());
        // Counts can take some time to propagate
        while (deadline.hasTimeLeft() && (stats.getNumSuccessful() != 0 || stats.getNumFailed() != 1)) {
            Thread.sleep(100);
        }
        assertEquals(1, stats.getNumRequests());
        assertEquals(0, stats.getNumSuccessful());
        assertEquals(1, stats.getNumFailed());
    } finally {
        if (client != null) {
            client.shutDown();
        }
        if (serverChannel != null) {
            serverChannel.close();
        }
        assertEquals("Channel leak", 0, stats.getNumConnections());
    }
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) Deadline(scala.concurrent.duration.Deadline) SocketChannel(io.netty.channel.socket.SocketChannel) NioServerSocketChannel(io.netty.channel.socket.nio.NioServerSocketChannel) Channel(io.netty.channel.Channel) KvStateServerAddress(org.apache.flink.runtime.query.KvStateServerAddress) AtomicReference(java.util.concurrent.atomic.AtomicReference) ChannelHandlerContext(io.netty.channel.ChannelHandlerContext) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) KvStateID(org.apache.flink.runtime.query.KvStateID) ChannelInboundHandlerAdapter(io.netty.channel.ChannelInboundHandlerAdapter) Test(org.junit.Test)

Example 2 with ClosedChannelException

use of java.nio.channels.ClosedChannelException in project hadoop by apache.

the class DatasetVolumeChecker method checkVolume.

/**
   * Check a single volume asynchronously, returning a {@link ListenableFuture}
   * that can be used to retrieve the final result.
   *
   * If the volume cannot be referenced then it is already closed and
   * cannot be checked. No error is propagated to the callback.
   *
   * @param volume the volume that is to be checked.
   * @param callback callback to be invoked when the volume check completes.
   * @return true if the check was scheduled and the callback will be invoked.
   *         false otherwise.
   */
public boolean checkVolume(final FsVolumeSpi volume, Callback callback) {
    if (volume == null) {
        LOG.debug("Cannot schedule check on null volume");
        return false;
    }
    FsVolumeReference volumeReference;
    try {
        volumeReference = volume.obtainReference();
    } catch (ClosedChannelException e) {
        // The volume has already been closed.
        return false;
    }
    Optional<ListenableFuture<VolumeCheckResult>> olf = delegateChecker.schedule(volume, IGNORED_CONTEXT);
    if (olf.isPresent()) {
        numVolumeChecks.incrementAndGet();
        Futures.addCallback(olf.get(), new ResultHandler(volumeReference, new HashSet<>(), new HashSet<>(), new AtomicLong(1), callback));
        return true;
    } else {
        IOUtils.cleanup(null, volumeReference);
    }
    return false;
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) AtomicLong(java.util.concurrent.atomic.AtomicLong) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) HashSet(java.util.HashSet)

Example 3 with ClosedChannelException

use of java.nio.channels.ClosedChannelException in project hadoop by apache.

the class DataXceiver method run.

/**
   * Read/write data from/to the DataXceiverServer.
   */
@Override
public void run() {
    int opsProcessed = 0;
    Op op = null;
    try {
        synchronized (this) {
            xceiver = Thread.currentThread();
        }
        dataXceiverServer.addPeer(peer, Thread.currentThread(), this);
        peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
        InputStream input = socketIn;
        try {
            IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut, socketIn, datanode.getXferAddress().getPort(), datanode.getDatanodeId());
            input = new BufferedInputStream(saslStreams.in, smallBufferSize);
            socketOut = saslStreams.out;
        } catch (InvalidMagicNumberException imne) {
            if (imne.isHandshake4Encryption()) {
                LOG.info("Failed to read expected encryption handshake from client " + "at " + peer.getRemoteAddressString() + ". Perhaps the client " + "is running an older version of Hadoop which does not support " + "encryption");
            } else {
                LOG.info("Failed to read expected SASL data transfer protection " + "handshake from client at " + peer.getRemoteAddressString() + ". Perhaps the client is running an older version of Hadoop " + "which does not support SASL data transfer protection");
            }
            return;
        }
        super.initialize(new DataInputStream(input));
        // Setting keepalive timeout to 0 disable this behavior.
        do {
            updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));
            try {
                if (opsProcessed != 0) {
                    assert dnConf.socketKeepaliveTimeout > 0;
                    peer.setReadTimeout(dnConf.socketKeepaliveTimeout);
                } else {
                    peer.setReadTimeout(dnConf.socketTimeout);
                }
                op = readOp();
            } catch (InterruptedIOException ignored) {
                // Time out while we wait for client rpc
                break;
            } catch (EOFException | ClosedChannelException e) {
                // Since we optimistically expect the next op, it's quite normal to
                // get EOF here.
                LOG.debug("Cached {} closing after {} ops.  " + "This message is usually benign.", peer, opsProcessed);
                break;
            } catch (IOException err) {
                incrDatanodeNetworkErrors();
                throw err;
            }
            // restore normal timeout
            if (opsProcessed != 0) {
                peer.setReadTimeout(dnConf.socketTimeout);
            }
            opStartTime = monotonicNow();
            processOp(op);
            ++opsProcessed;
        } while ((peer != null) && (!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0));
    } catch (Throwable t) {
        String s = datanode.getDisplayName() + ":DataXceiver error processing " + ((op == null) ? "unknown" : op.name()) + " operation " + " src: " + remoteAddress + " dst: " + localAddress;
        if (op == Op.WRITE_BLOCK && t instanceof ReplicaAlreadyExistsException) {
            // at the same time.
            if (LOG.isTraceEnabled()) {
                LOG.trace(s, t);
            } else {
                LOG.info(s + "; " + t);
            }
        } else if (op == Op.READ_BLOCK && t instanceof SocketTimeoutException) {
            String s1 = "Likely the client has stopped reading, disconnecting it";
            s1 += " (" + s + ")";
            if (LOG.isTraceEnabled()) {
                LOG.trace(s1, t);
            } else {
                LOG.info(s1 + "; " + t);
            }
        } else if (t instanceof InvalidToken) {
            // checkAccess() method and this is not a server error.
            if (LOG.isTraceEnabled()) {
                LOG.trace(s, t);
            }
        } else {
            LOG.error(s, t);
        }
    } finally {
        collectThreadLocalStates();
        if (LOG.isDebugEnabled()) {
            LOG.debug(datanode.getDisplayName() + ":Number of active connections is: " + datanode.getXceiverCount());
        }
        updateCurrentThreadName("Cleaning up");
        if (peer != null) {
            dataXceiverServer.closePeer(peer);
            IOUtils.closeStream(in);
        }
    }
}
Also used : Op(org.apache.hadoop.hdfs.protocol.datatransfer.Op) InterruptedIOException(java.io.InterruptedIOException) ClosedChannelException(java.nio.channels.ClosedChannelException) BufferedInputStream(java.io.BufferedInputStream) DataInputStream(java.io.DataInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) InvalidMagicNumberException(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.InvalidMagicNumberException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ByteString(com.google.protobuf.ByteString) DataInputStream(java.io.DataInputStream) SocketTimeoutException(java.net.SocketTimeoutException) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) BufferedInputStream(java.io.BufferedInputStream) EOFException(java.io.EOFException) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken)

Example 4 with ClosedChannelException

use of java.nio.channels.ClosedChannelException in project hadoop by apache.

the class TestClose method testWriteAfterClose.

@Test
public void testWriteAfterClose() throws IOException {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        final byte[] data = "foo".getBytes();
        FileSystem fs = FileSystem.get(conf);
        OutputStream out = fs.create(new Path("/test"));
        out.write(data);
        out.close();
        try {
            // Should fail.
            out.write(data);
            fail("Should not have been able to write more data after file is closed.");
        } catch (ClosedChannelException cce) {
        // We got the correct exception. Ignoring.
        }
        // Should succeed. Double closes are OK.
        out.close();
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ClosedChannelException(java.nio.channels.ClosedChannelException) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) OutputStream(java.io.OutputStream) Test(org.junit.Test)

Example 5 with ClosedChannelException

use of java.nio.channels.ClosedChannelException in project hadoop by apache.

the class FsDatasetImpl method getVolumeInfo.

private Collection<VolumeInfo> getVolumeInfo() {
    Collection<VolumeInfo> info = new ArrayList<VolumeInfo>();
    for (FsVolumeImpl volume : volumes.getVolumes()) {
        long used = 0;
        long free = 0;
        try (FsVolumeReference ref = volume.obtainReference()) {
            used = volume.getDfsUsed();
            free = volume.getAvailable();
        } catch (ClosedChannelException e) {
            continue;
        } catch (IOException e) {
            LOG.warn(e.getMessage());
            used = 0;
            free = 0;
        }
        info.add(new VolumeInfo(volume, used, free));
    }
    return info;
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ArrayList(java.util.ArrayList) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Aggregations

ClosedChannelException (java.nio.channels.ClosedChannelException)211 ByteBuffer (java.nio.ByteBuffer)67 IOException (java.io.IOException)60 Test (org.junit.Test)23 InetSocketAddress (java.net.InetSocketAddress)19 SelectionKey (java.nio.channels.SelectionKey)18 SocketChannel (java.nio.channels.SocketChannel)15 ArrayList (java.util.ArrayList)13 NotYetConnectedException (java.nio.channels.NotYetConnectedException)11 InterruptedIOException (java.io.InterruptedIOException)10 CancelledKeyException (java.nio.channels.CancelledKeyException)10 ShutdownCommand (com.cloud.agent.api.ShutdownCommand)9 File (java.io.File)9 ServerSocketChannel (java.nio.channels.ServerSocketChannel)9 PooledByteBuffer (io.undertow.connector.PooledByteBuffer)8 FileChannel (java.nio.channels.FileChannel)8 ConnectException (java.net.ConnectException)7 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)6 AgentControlCommand (com.cloud.agent.api.AgentControlCommand)5 Command (com.cloud.agent.api.Command)5