use of java.nio.channels.ClosedChannelException in project flink by apache.
the class KvStateClientTest method testServerClosesChannel.
/**
* Tests that a server channel close, closes the connection and removes it
* from the established connections.
*/
@Test
public void testServerClosesChannel() throws Exception {
Deadline deadline = TEST_TIMEOUT.fromNow();
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
KvStateClient client = null;
Channel serverChannel = null;
try {
client = new KvStateClient(1, stats);
final AtomicBoolean received = new AtomicBoolean();
final AtomicReference<Channel> channel = new AtomicReference<>();
serverChannel = createServerChannel(new ChannelInboundHandlerAdapter() {
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
channel.set(ctx.channel());
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
received.set(true);
}
});
KvStateServerAddress serverAddress = getKvStateServerAddress(serverChannel);
// Requests
Future<byte[]> future = client.getKvState(serverAddress, new KvStateID(), new byte[0]);
while (!received.get() && deadline.hasTimeLeft()) {
Thread.sleep(50);
}
assertTrue("Receive timed out", received.get());
assertEquals(1, stats.getNumConnections());
channel.get().close().await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
try {
Await.result(future, deadline.timeLeft());
fail("Did not throw expected server failure");
} catch (ClosedChannelException ignored) {
// Expected
}
assertEquals(0, stats.getNumConnections());
// Counts can take some time to propagate
while (deadline.hasTimeLeft() && (stats.getNumSuccessful() != 0 || stats.getNumFailed() != 1)) {
Thread.sleep(100);
}
assertEquals(1, stats.getNumRequests());
assertEquals(0, stats.getNumSuccessful());
assertEquals(1, stats.getNumFailed());
} finally {
if (client != null) {
client.shutDown();
}
if (serverChannel != null) {
serverChannel.close();
}
assertEquals("Channel leak", 0, stats.getNumConnections());
}
}
use of java.nio.channels.ClosedChannelException in project hadoop by apache.
the class DatasetVolumeChecker method checkVolume.
/**
* Check a single volume asynchronously, returning a {@link ListenableFuture}
* that can be used to retrieve the final result.
*
* If the volume cannot be referenced then it is already closed and
* cannot be checked. No error is propagated to the callback.
*
* @param volume the volume that is to be checked.
* @param callback callback to be invoked when the volume check completes.
* @return true if the check was scheduled and the callback will be invoked.
* false otherwise.
*/
public boolean checkVolume(final FsVolumeSpi volume, Callback callback) {
if (volume == null) {
LOG.debug("Cannot schedule check on null volume");
return false;
}
FsVolumeReference volumeReference;
try {
volumeReference = volume.obtainReference();
} catch (ClosedChannelException e) {
// The volume has already been closed.
return false;
}
Optional<ListenableFuture<VolumeCheckResult>> olf = delegateChecker.schedule(volume, IGNORED_CONTEXT);
if (olf.isPresent()) {
numVolumeChecks.incrementAndGet();
Futures.addCallback(olf.get(), new ResultHandler(volumeReference, new HashSet<>(), new HashSet<>(), new AtomicLong(1), callback));
return true;
} else {
IOUtils.cleanup(null, volumeReference);
}
return false;
}
use of java.nio.channels.ClosedChannelException in project hadoop by apache.
the class DataXceiver method run.
/**
* Read/write data from/to the DataXceiverServer.
*/
@Override
public void run() {
int opsProcessed = 0;
Op op = null;
try {
synchronized (this) {
xceiver = Thread.currentThread();
}
dataXceiverServer.addPeer(peer, Thread.currentThread(), this);
peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
InputStream input = socketIn;
try {
IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut, socketIn, datanode.getXferAddress().getPort(), datanode.getDatanodeId());
input = new BufferedInputStream(saslStreams.in, smallBufferSize);
socketOut = saslStreams.out;
} catch (InvalidMagicNumberException imne) {
if (imne.isHandshake4Encryption()) {
LOG.info("Failed to read expected encryption handshake from client " + "at " + peer.getRemoteAddressString() + ". Perhaps the client " + "is running an older version of Hadoop which does not support " + "encryption");
} else {
LOG.info("Failed to read expected SASL data transfer protection " + "handshake from client at " + peer.getRemoteAddressString() + ". Perhaps the client is running an older version of Hadoop " + "which does not support SASL data transfer protection");
}
return;
}
super.initialize(new DataInputStream(input));
// Setting keepalive timeout to 0 disable this behavior.
do {
updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));
try {
if (opsProcessed != 0) {
assert dnConf.socketKeepaliveTimeout > 0;
peer.setReadTimeout(dnConf.socketKeepaliveTimeout);
} else {
peer.setReadTimeout(dnConf.socketTimeout);
}
op = readOp();
} catch (InterruptedIOException ignored) {
// Time out while we wait for client rpc
break;
} catch (EOFException | ClosedChannelException e) {
// Since we optimistically expect the next op, it's quite normal to
// get EOF here.
LOG.debug("Cached {} closing after {} ops. " + "This message is usually benign.", peer, opsProcessed);
break;
} catch (IOException err) {
incrDatanodeNetworkErrors();
throw err;
}
// restore normal timeout
if (opsProcessed != 0) {
peer.setReadTimeout(dnConf.socketTimeout);
}
opStartTime = monotonicNow();
processOp(op);
++opsProcessed;
} while ((peer != null) && (!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0));
} catch (Throwable t) {
String s = datanode.getDisplayName() + ":DataXceiver error processing " + ((op == null) ? "unknown" : op.name()) + " operation " + " src: " + remoteAddress + " dst: " + localAddress;
if (op == Op.WRITE_BLOCK && t instanceof ReplicaAlreadyExistsException) {
// at the same time.
if (LOG.isTraceEnabled()) {
LOG.trace(s, t);
} else {
LOG.info(s + "; " + t);
}
} else if (op == Op.READ_BLOCK && t instanceof SocketTimeoutException) {
String s1 = "Likely the client has stopped reading, disconnecting it";
s1 += " (" + s + ")";
if (LOG.isTraceEnabled()) {
LOG.trace(s1, t);
} else {
LOG.info(s1 + "; " + t);
}
} else if (t instanceof InvalidToken) {
// checkAccess() method and this is not a server error.
if (LOG.isTraceEnabled()) {
LOG.trace(s, t);
}
} else {
LOG.error(s, t);
}
} finally {
collectThreadLocalStates();
if (LOG.isDebugEnabled()) {
LOG.debug(datanode.getDisplayName() + ":Number of active connections is: " + datanode.getXceiverCount());
}
updateCurrentThreadName("Cleaning up");
if (peer != null) {
dataXceiverServer.closePeer(peer);
IOUtils.closeStream(in);
}
}
}
use of java.nio.channels.ClosedChannelException in project hadoop by apache.
the class TestClose method testWriteAfterClose.
@Test
public void testWriteAfterClose() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
final byte[] data = "foo".getBytes();
FileSystem fs = FileSystem.get(conf);
OutputStream out = fs.create(new Path("/test"));
out.write(data);
out.close();
try {
// Should fail.
out.write(data);
fail("Should not have been able to write more data after file is closed.");
} catch (ClosedChannelException cce) {
// We got the correct exception. Ignoring.
}
// Should succeed. Double closes are OK.
out.close();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of java.nio.channels.ClosedChannelException in project hadoop by apache.
the class FsDatasetImpl method getVolumeInfo.
private Collection<VolumeInfo> getVolumeInfo() {
Collection<VolumeInfo> info = new ArrayList<VolumeInfo>();
for (FsVolumeImpl volume : volumes.getVolumes()) {
long used = 0;
long free = 0;
try (FsVolumeReference ref = volume.obtainReference()) {
used = volume.getDfsUsed();
free = volume.getAvailable();
} catch (ClosedChannelException e) {
continue;
} catch (IOException e) {
LOG.warn(e.getMessage());
used = 0;
free = 0;
}
info.add(new VolumeInfo(volume, used, free));
}
return info;
}
Aggregations