use of java.nio.channels.ClosedChannelException in project flink by apache.
the class KvStateClientTest method testServerClosesChannel.
/**
* Tests that a server channel close, closes the connection and removes it
* from the established connections.
*/
@Test
public void testServerClosesChannel() throws Exception {
Deadline deadline = TEST_TIMEOUT.fromNow();
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
KvStateClient client = null;
Channel serverChannel = null;
try {
client = new KvStateClient(1, stats);
final AtomicBoolean received = new AtomicBoolean();
final AtomicReference<Channel> channel = new AtomicReference<>();
serverChannel = createServerChannel(new ChannelInboundHandlerAdapter() {
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
channel.set(ctx.channel());
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
received.set(true);
}
});
KvStateServerAddress serverAddress = getKvStateServerAddress(serverChannel);
// Requests
Future<byte[]> future = client.getKvState(serverAddress, new KvStateID(), new byte[0]);
while (!received.get() && deadline.hasTimeLeft()) {
Thread.sleep(50);
}
assertTrue("Receive timed out", received.get());
assertEquals(1, stats.getNumConnections());
channel.get().close().await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
try {
Await.result(future, deadline.timeLeft());
fail("Did not throw expected server failure");
} catch (ClosedChannelException ignored) {
// Expected
}
assertEquals(0, stats.getNumConnections());
// Counts can take some time to propagate
while (deadline.hasTimeLeft() && (stats.getNumSuccessful() != 0 || stats.getNumFailed() != 1)) {
Thread.sleep(100);
}
assertEquals(1, stats.getNumRequests());
assertEquals(0, stats.getNumSuccessful());
assertEquals(1, stats.getNumFailed());
} finally {
if (client != null) {
client.shutDown();
}
if (serverChannel != null) {
serverChannel.close();
}
assertEquals("Channel leak", 0, stats.getNumConnections());
}
}
use of java.nio.channels.ClosedChannelException in project hadoop by apache.
the class TestDomainSocket method testAsyncCloseDuringIO.
/**
* Test that we get an AsynchronousCloseException when the DomainSocket
* we're using is closed during a read or write operation.
*
* @throws IOException
*/
private void testAsyncCloseDuringIO(final boolean closeDuringWrite) throws Exception {
final String TEST_PATH = new File(sockDir.getDir(), "testAsyncCloseDuringIO(" + closeDuringWrite + ")").getAbsolutePath();
final DomainSocket serv = DomainSocket.bindAndListen(TEST_PATH);
ExecutorService exeServ = Executors.newFixedThreadPool(2);
Callable<Void> serverCallable = new Callable<Void>() {
public Void call() {
DomainSocket serverConn = null;
try {
serverConn = serv.accept();
byte[] buf = new byte[100];
for (int i = 0; i < buf.length; i++) {
buf[i] = 0;
}
// reads return EOF, and writes get a socket error.
if (closeDuringWrite) {
try {
while (true) {
serverConn.getOutputStream().write(buf);
}
} catch (IOException e) {
}
} else {
do {
;
} while (serverConn.getInputStream().read(buf, 0, buf.length) != -1);
}
} catch (IOException e) {
throw new RuntimeException("unexpected IOException", e);
} finally {
IOUtils.cleanup(DomainSocket.LOG, serverConn);
}
return null;
}
};
Future<Void> serverFuture = exeServ.submit(serverCallable);
final DomainSocket clientConn = DomainSocket.connect(serv.getPath());
Callable<Void> clientCallable = new Callable<Void>() {
public Void call() {
// The client writes or reads until another thread
// asynchronously closes the socket. At that point, we should
// get ClosedChannelException, or possibly its subclass
// AsynchronousCloseException.
byte[] buf = new byte[100];
for (int i = 0; i < buf.length; i++) {
buf[i] = 0;
}
try {
if (closeDuringWrite) {
while (true) {
clientConn.getOutputStream().write(buf);
}
} else {
while (true) {
clientConn.getInputStream().read(buf, 0, buf.length);
}
}
} catch (ClosedChannelException e) {
return null;
} catch (IOException e) {
throw new RuntimeException("unexpected IOException", e);
}
}
};
Future<Void> clientFuture = exeServ.submit(clientCallable);
Thread.sleep(500);
clientConn.close();
serv.close();
clientFuture.get(2, TimeUnit.MINUTES);
serverFuture.get(2, TimeUnit.MINUTES);
}
use of java.nio.channels.ClosedChannelException in project hadoop by apache.
the class DataStreamer method waitForAckedSeqno.
/**
* wait for the ack of seqno
*
* @param seqno the sequence number to be acked
* @throws IOException
*/
void waitForAckedSeqno(long seqno) throws IOException {
try (TraceScope ignored = dfsClient.getTracer().newScope("waitForAckedSeqno")) {
LOG.debug("{} waiting for ack for: {}", this, seqno);
long begin = Time.monotonicNow();
try {
synchronized (dataQueue) {
while (!streamerClosed) {
checkClosed();
if (lastAckedSeqno >= seqno) {
break;
}
try {
// when we receive an ack, we notify on
dataQueue.wait(1000);
// dataQueue
} catch (InterruptedException ie) {
throw new InterruptedIOException("Interrupted while waiting for data to be acknowledged by pipeline");
}
}
}
checkClosed();
} catch (ClosedChannelException cce) {
}
long duration = Time.monotonicNow() - begin;
if (duration > dfsclientSlowLogThresholdMs) {
LOG.warn("Slow waitForAckedSeqno took {}ms (threshold={}ms). File being" + " written: {}, block: {}, Write pipeline datanodes: {}.", duration, dfsclientSlowLogThresholdMs, src, block, nodes);
}
}
}
use of java.nio.channels.ClosedChannelException in project hadoop by apache.
the class DFSStripedOutputStream method closeImpl.
@Override
protected synchronized void closeImpl() throws IOException {
if (isClosed()) {
final MultipleIOException.Builder b = new MultipleIOException.Builder();
for (int i = 0; i < streamers.size(); i++) {
final StripedDataStreamer si = getStripedDataStreamer(i);
try {
si.getLastException().check(true);
} catch (IOException e) {
b.add(e);
}
}
final IOException ioe = b.build();
if (ioe != null) {
throw ioe;
}
return;
}
try {
try {
// flush from all upper layers
flushBuffer();
// if the last stripe is incomplete, generate and write parity cells
if (generateParityCellsForLastStripe()) {
writeParityCells();
}
enqueueAllCurrentPackets();
// flush all the data packets
flushAllInternals();
// check failures
checkStreamerFailures();
for (int i = 0; i < numAllBlocks; i++) {
final StripedDataStreamer s = setCurrentStreamer(i);
if (s.isHealthy()) {
try {
if (s.getBytesCurBlock() > 0) {
setCurrentPacketToEmpty();
}
// flush the last "close" packet to Datanode
flushInternal();
} catch (Exception e) {
// TODO for both close and endBlock, we currently do not handle
// failures when sending the last packet. We actually do not need to
// bump GS for this kind of failure. Thus counting the total number
// of failures may be good enough.
}
}
}
} finally {
// Failures may happen when flushing data/parity data out. Exceptions
// may be thrown if more than 3 streamers fail, or updatePipeline RPC
// fails. Streamers may keep waiting for the new block/GS information.
// Thus need to force closing these threads.
closeThreads(true);
}
try (TraceScope ignored = dfsClient.getTracer().newScope("completeFile")) {
completeFile(currentBlockGroup);
}
logCorruptBlocks();
} catch (ClosedChannelException ignored) {
} finally {
setClosed();
// shutdown executor of flushAll tasks
flushAllExecutor.shutdownNow();
}
}
use of java.nio.channels.ClosedChannelException in project neo4j by neo4j.
the class PageSwapperTest method readMustNotReopenExplicitlyClosedChannel.
@Test
public void readMustNotReopenExplicitlyClosedChannel() throws Exception {
String filename = "a";
File file = file(filename);
ByteBufferPage page = createPage();
PageSwapperFactory swapperFactory = createSwapperFactory();
PageSwapper swapper = createSwapperAndFile(swapperFactory, file);
swapper.write(0, page);
swapper.close();
try {
swapper.read(0, page);
fail("Should have thrown because the channel should be closed");
} catch (ClosedChannelException ignore) {
// This is fine.
}
}
Aggregations