use of java.io.EOFException in project hadoop by apache.
the class TestRequestHedgingProxyProvider method testHedgingWhenConnectAndEOFException.
@Test
public void testHedgingWhenConnectAndEOFException() throws Exception {
NamenodeProtocols active = Mockito.mock(NamenodeProtocols.class);
Mockito.when(active.getStats()).thenThrow(new EOFException());
NamenodeProtocols standby = Mockito.mock(NamenodeProtocols.class);
Mockito.when(standby.getStats()).thenThrow(new ConnectException());
RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(active, standby));
try {
provider.getProxy().proxy.getStats();
Assert.fail("Should fail since both active and standby namenodes throw" + " Exceptions!");
} catch (MultiException me) {
for (Exception ex : me.getExceptions().values()) {
if (!(ex instanceof ConnectException) && !(ex instanceof EOFException)) {
Assert.fail("Unexpected Exception " + ex.getMessage());
}
}
}
Mockito.verify(active).getStats();
Mockito.verify(standby).getStats();
}
use of java.io.EOFException in project hadoop by apache.
the class FsDatasetImpl method checkBlock.
/**
* Check if a block is valid.
*
* @param b The block to check.
* @param minLength The minimum length that the block must have. May be 0.
* @param state If this is null, it is ignored. If it is non-null, we
* will check that the replica has this state.
*
* @throws ReplicaNotFoundException If the replica is not found
*
* @throws UnexpectedReplicaStateException If the replica is not in the
* expected state.
* @throws FileNotFoundException If the block file is not found or there
* was an error locating it.
* @throws EOFException If the replica length is too short.
*
* @throws IOException May be thrown from the methods called.
*/
// FsDatasetSpi
@Override
public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state) throws ReplicaNotFoundException, UnexpectedReplicaStateException, FileNotFoundException, EOFException, IOException {
final ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
if (replicaInfo == null) {
throw new ReplicaNotFoundException(b);
}
if (replicaInfo.getState() != state) {
throw new UnexpectedReplicaStateException(b, state);
}
if (!replicaInfo.blockDataExists()) {
throw new FileNotFoundException(replicaInfo.getBlockURI().toString());
}
long onDiskLength = getLength(b);
if (onDiskLength < minLength) {
throw new EOFException(b + "'s on-disk length " + onDiskLength + " is shorter than minLength " + minLength);
}
}
use of java.io.EOFException in project hadoop by apache.
the class ByteRangeInputStream method openInputStream.
@VisibleForTesting
protected InputStreamAndFileLength openInputStream(long startOffset) throws IOException {
if (startOffset < 0) {
throw new EOFException("Negative Position");
}
// Use the original url if no resolved url exists, eg. if
// it's the first time a request is made.
final boolean resolved = resolvedURL.getURL() != null;
final URLOpener opener = resolved ? resolvedURL : originalURL;
final HttpURLConnection connection = opener.connect(startOffset, resolved);
resolvedURL.setURL(getResolvedUrl(connection));
InputStream in = connection.getInputStream();
final Long length;
final Map<String, List<String>> headers = connection.getHeaderFields();
if (isChunkedTransferEncoding(headers)) {
// file length is not known
length = null;
} else {
// for non-chunked transfer-encoding, get content-length
final String cl = connection.getHeaderField(HttpHeaders.CONTENT_LENGTH);
if (cl == null) {
throw new IOException(HttpHeaders.CONTENT_LENGTH + " is missing: " + headers);
}
final long streamlength = Long.parseLong(cl);
length = startOffset + streamlength;
// Java has a bug with >2GB request streams. It won't bounds check
// the reads so the transfer blocks until the server times out
in = new BoundedInputStream(in, streamlength);
}
return new InputStreamAndFileLength(length, in);
}
use of java.io.EOFException in project hadoop by apache.
the class TestRpcProgramNfs3 method getFileContentsUsingDfs.
private byte[] getFileContentsUsingDfs(String fileName, int len) throws Exception {
final FSDataInputStream in = hdfs.open(new Path(fileName));
final byte[] ret = new byte[len];
in.readFully(ret);
try {
in.readByte();
Assert.fail("expected end of file");
} catch (EOFException e) {
// expected. Unfortunately there is no associated message to check
}
in.close();
return ret;
}
use of java.io.EOFException in project hadoop by apache.
the class TestPread method testTruncateWhileReading.
@Test
public void testTruncateWhileReading() throws Exception {
Path path = new Path("/testfile");
final int blockSize = 512;
// prevent initial pre-fetch of multiple block locations
Configuration conf = new Configuration();
conf.setLong(HdfsClientConfigKeys.Read.PREFETCH_SIZE_KEY, blockSize);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
DistributedFileSystem fs = cluster.getFileSystem();
// create multi-block file
FSDataOutputStream dos = fs.create(path, true, blockSize, (short) 1, blockSize);
dos.write(new byte[blockSize * 3]);
dos.close();
// truncate a file while it's open
final FSDataInputStream dis = fs.open(path);
while (!fs.truncate(path, 10)) {
Thread.sleep(10);
}
// verify that reading bytes outside the initial pre-fetch do
// not send the client into an infinite loop querying locations.
ExecutorService executor = Executors.newFixedThreadPool(1);
Future<?> future = executor.submit(new Callable<Void>() {
@Override
public Void call() throws IOException {
// read from 2nd block.
dis.readFully(blockSize, new byte[4]);
return null;
}
});
try {
future.get(4, TimeUnit.SECONDS);
Assert.fail();
} catch (ExecutionException ee) {
assertTrue(ee.toString(), ee.getCause() instanceof EOFException);
} finally {
future.cancel(true);
executor.shutdown();
}
} finally {
cluster.shutdown();
}
}
Aggregations