use of java.io.EOFException in project hadoop by apache.
the class SocketOutputStream method transferToFully.
/**
* Transfers data from FileChannel using
* {@link FileChannel#transferTo(long, long, WritableByteChannel)}.
* Updates <code>waitForWritableTime</code> and <code>transferToTime</code>
* with the time spent blocked on the network and the time spent transferring
* data from disk to network respectively.
*
* Similar to readFully(), this waits till requested amount of
* data is transfered.
*
* @param fileCh FileChannel to transfer data from.
* @param position position within the channel where the transfer begins
* @param count number of bytes to transfer.
* @param waitForWritableTime nanoseconds spent waiting for the socket
* to become writable
* @param transferTime nanoseconds spent transferring data
*
* @throws EOFException
* If end of input file is reached before requested number of
* bytes are transfered.
*
* @throws SocketTimeoutException
* If this channel blocks transfer longer than timeout for
* this stream.
*
* @throws IOException Includes any exception thrown by
* {@link FileChannel#transferTo(long, long, WritableByteChannel)}.
*/
public void transferToFully(FileChannel fileCh, long position, int count, LongWritable waitForWritableTime, LongWritable transferToTime) throws IOException {
long waitTime = 0;
long transferTime = 0;
while (count > 0) {
/*
* Ideally we should wait after transferTo returns 0. But because of
* a bug in JRE on Linux (http://bugs.sun.com/view_bug.do?bug_id=5103988),
* which throws an exception instead of returning 0, we wait for the
* channel to be writable before writing to it. If you ever see
* IOException with message "Resource temporarily unavailable"
* thrown here, please let us know.
*
* Once we move to JAVA SE 7, wait should be moved to correct place.
*/
long start = System.nanoTime();
waitForWritable();
long wait = System.nanoTime();
int nTransfered = (int) fileCh.transferTo(position, count, getChannel());
if (nTransfered == 0) {
//check if end of file is reached.
if (position >= fileCh.size()) {
throw new EOFException("EOF Reached. file size is " + fileCh.size() + " and " + count + " more bytes left to be " + "transfered.");
}
//otherwise assume the socket is full.
//waitForWritable(); // see comment above.
} else if (nTransfered < 0) {
throw new IOException("Unexpected return of " + nTransfered + " from transferTo()");
} else {
position += nTransfered;
count -= nTransfered;
}
long transfer = System.nanoTime();
waitTime += wait - start;
transferTime += transfer - wait;
}
if (waitForWritableTime != null) {
waitForWritableTime.set(waitTime);
}
if (transferToTime != null) {
transferToTime.set(transferTime);
}
}
use of java.io.EOFException in project flink by apache.
the class InPlaceMutableHashTable method insert.
/**
* Inserts the given record into the hash table.
* Note: this method doesn't care about whether a record with the same key is already present.
* @param record The record to insert.
* @throws IOException (EOFException specifically, if memory ran out)
*/
@Override
public void insert(T record) throws IOException {
if (closed) {
return;
}
final int hashCode = MathUtils.jenkinsHash(buildSideComparator.hash(record));
final int bucket = hashCode & numBucketsMask;
// which segment contains the bucket
final int bucketSegmentIndex = bucket >>> numBucketsPerSegmentBits;
final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex];
// offset of the bucket in the segment
final int bucketOffset = (bucket & numBucketsPerSegmentMask) << bucketSizeBits;
final long firstPointer = bucketSegment.getLong(bucketOffset);
try {
final long newFirstPointer = recordArea.appendPointerAndRecord(firstPointer, record);
bucketSegment.putLong(bucketOffset, newFirstPointer);
} catch (EOFException ex) {
compactOrThrow();
insert(record);
return;
}
numElements++;
resizeTableIfNecessary();
}
use of java.io.EOFException in project hadoop by apache.
the class ContractTestUtils method readDataset.
/**
* Read the file and convert to a byte dataset.
* This implements readfully internally, so that it will read
* in the file without ever having to seek()
* @param fs filesystem
* @param path path to read from
* @param len length of data to read
* @return the bytes
* @throws IOException IO problems
*/
public static byte[] readDataset(FileSystem fs, Path path, int len) throws IOException {
byte[] dest = new byte[len];
int offset = 0;
int nread = 0;
try (FSDataInputStream in = fs.open(path)) {
while (nread < len) {
int nbytes = in.read(dest, offset + nread, len - nread);
if (nbytes < 0) {
throw new EOFException("End of file reached before reading fully.");
}
nread += nbytes;
}
}
return dest;
}
use of java.io.EOFException in project hadoop by apache.
the class DataXceiver method run.
/**
* Read/write data from/to the DataXceiverServer.
*/
@Override
public void run() {
int opsProcessed = 0;
Op op = null;
try {
synchronized (this) {
xceiver = Thread.currentThread();
}
dataXceiverServer.addPeer(peer, Thread.currentThread(), this);
peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
InputStream input = socketIn;
try {
IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut, socketIn, datanode.getXferAddress().getPort(), datanode.getDatanodeId());
input = new BufferedInputStream(saslStreams.in, smallBufferSize);
socketOut = saslStreams.out;
} catch (InvalidMagicNumberException imne) {
if (imne.isHandshake4Encryption()) {
LOG.info("Failed to read expected encryption handshake from client " + "at " + peer.getRemoteAddressString() + ". Perhaps the client " + "is running an older version of Hadoop which does not support " + "encryption");
} else {
LOG.info("Failed to read expected SASL data transfer protection " + "handshake from client at " + peer.getRemoteAddressString() + ". Perhaps the client is running an older version of Hadoop " + "which does not support SASL data transfer protection");
}
return;
}
super.initialize(new DataInputStream(input));
// Setting keepalive timeout to 0 disable this behavior.
do {
updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));
try {
if (opsProcessed != 0) {
assert dnConf.socketKeepaliveTimeout > 0;
peer.setReadTimeout(dnConf.socketKeepaliveTimeout);
} else {
peer.setReadTimeout(dnConf.socketTimeout);
}
op = readOp();
} catch (InterruptedIOException ignored) {
// Time out while we wait for client rpc
break;
} catch (EOFException | ClosedChannelException e) {
// Since we optimistically expect the next op, it's quite normal to
// get EOF here.
LOG.debug("Cached {} closing after {} ops. " + "This message is usually benign.", peer, opsProcessed);
break;
} catch (IOException err) {
incrDatanodeNetworkErrors();
throw err;
}
// restore normal timeout
if (opsProcessed != 0) {
peer.setReadTimeout(dnConf.socketTimeout);
}
opStartTime = monotonicNow();
processOp(op);
++opsProcessed;
} while ((peer != null) && (!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0));
} catch (Throwable t) {
String s = datanode.getDisplayName() + ":DataXceiver error processing " + ((op == null) ? "unknown" : op.name()) + " operation " + " src: " + remoteAddress + " dst: " + localAddress;
if (op == Op.WRITE_BLOCK && t instanceof ReplicaAlreadyExistsException) {
// at the same time.
if (LOG.isTraceEnabled()) {
LOG.trace(s, t);
} else {
LOG.info(s + "; " + t);
}
} else if (op == Op.READ_BLOCK && t instanceof SocketTimeoutException) {
String s1 = "Likely the client has stopped reading, disconnecting it";
s1 += " (" + s + ")";
if (LOG.isTraceEnabled()) {
LOG.trace(s1, t);
} else {
LOG.info(s1 + "; " + t);
}
} else if (t instanceof InvalidToken) {
// checkAccess() method and this is not a server error.
if (LOG.isTraceEnabled()) {
LOG.trace(s, t);
}
} else {
LOG.error(s, t);
}
} finally {
collectThreadLocalStates();
if (LOG.isDebugEnabled()) {
LOG.debug(datanode.getDisplayName() + ":Number of active connections is: " + datanode.getXceiverCount());
}
updateCurrentThreadName("Cleaning up");
if (peer != null) {
dataXceiverServer.closePeer(peer);
IOUtils.closeStream(in);
}
}
}
use of java.io.EOFException in project hadoop by apache.
the class StripedFileTestUtil method verifySeek.
static void verifySeek(FileSystem fs, Path srcPath, int fileLength, ErasureCodingPolicy ecPolicy, int blkGroupSize) throws IOException {
try (FSDataInputStream in = fs.open(srcPath)) {
// seek to 1/2 of content
int pos = fileLength / 2;
assertSeekAndRead(in, pos, fileLength);
// seek to 1/3 of content
pos = fileLength / 3;
assertSeekAndRead(in, pos, fileLength);
// seek to 0 pos
pos = 0;
assertSeekAndRead(in, pos, fileLength);
if (fileLength > ecPolicy.getCellSize()) {
// seek to cellSize boundary
pos = ecPolicy.getCellSize() - 1;
assertSeekAndRead(in, pos, fileLength);
}
if (fileLength > ecPolicy.getCellSize() * ecPolicy.getNumDataUnits()) {
// seek to striped cell group boundary
pos = ecPolicy.getCellSize() * ecPolicy.getNumDataUnits() - 1;
assertSeekAndRead(in, pos, fileLength);
}
if (fileLength > blkGroupSize) {
// seek to striped block group boundary
pos = blkGroupSize - 1;
assertSeekAndRead(in, pos, fileLength);
}
if (!(in.getWrappedStream() instanceof WebHdfsInputStream)) {
try {
in.seek(-1);
Assert.fail("Should be failed if seek to negative offset");
} catch (EOFException e) {
// expected
}
try {
in.seek(fileLength + 1);
Assert.fail("Should be failed if seek after EOF");
} catch (EOFException e) {
// expected
}
}
}
}
Aggregations