use of org.apache.hadoop.hdfs.web.WebHdfsFileSystem.WebHdfsInputStream in project hadoop by apache.
the class StripedFileTestUtil method verifySeek.
static void verifySeek(FileSystem fs, Path srcPath, int fileLength, ErasureCodingPolicy ecPolicy, int blkGroupSize) throws IOException {
try (FSDataInputStream in = fs.open(srcPath)) {
// seek to 1/2 of content
int pos = fileLength / 2;
assertSeekAndRead(in, pos, fileLength);
// seek to 1/3 of content
pos = fileLength / 3;
assertSeekAndRead(in, pos, fileLength);
// seek to 0 pos
pos = 0;
assertSeekAndRead(in, pos, fileLength);
if (fileLength > ecPolicy.getCellSize()) {
// seek to cellSize boundary
pos = ecPolicy.getCellSize() - 1;
assertSeekAndRead(in, pos, fileLength);
}
if (fileLength > ecPolicy.getCellSize() * ecPolicy.getNumDataUnits()) {
// seek to striped cell group boundary
pos = ecPolicy.getCellSize() * ecPolicy.getNumDataUnits() - 1;
assertSeekAndRead(in, pos, fileLength);
}
if (fileLength > blkGroupSize) {
// seek to striped block group boundary
pos = blkGroupSize - 1;
assertSeekAndRead(in, pos, fileLength);
}
if (!(in.getWrappedStream() instanceof WebHdfsInputStream)) {
try {
in.seek(-1);
Assert.fail("Should be failed if seek to negative offset");
} catch (EOFException e) {
// expected
}
try {
in.seek(fileLength + 1);
Assert.fail("Should be failed if seek after EOF");
} catch (EOFException e) {
// expected
}
}
}
}
use of org.apache.hadoop.hdfs.web.WebHdfsFileSystem.WebHdfsInputStream in project hadoop by apache.
the class TestWebHDFS method testReadRetryExceptionHelper.
private void testReadRetryExceptionHelper(WebHdfsFileSystem fs, Path fn, final IOException ex, String msg, boolean shouldAttemptRetry, int numTimesTried) throws Exception {
// Ovverride WebHdfsInputStream#getInputStream so that it returns
// an input stream that throws the specified exception when read
// is called.
FSDataInputStream in = fs.open(fn);
// Connection is made only when the first read() occurs.
in.read();
final WebHdfsInputStream webIn = (WebHdfsInputStream) (in.getWrappedStream());
final InputStream spyInputStream = spy(webIn.getReadRunner().getInputStream());
doThrow(ex).when(spyInputStream).read((byte[]) any(), anyInt(), anyInt());
final WebHdfsFileSystem.ReadRunner rr = spy(webIn.getReadRunner());
doReturn(spyInputStream).when(rr).initializeInputStream((HttpURLConnection) any());
rr.setInputStream(spyInputStream);
webIn.setReadRunner(rr);
// Override filesystem's retry policy in order to verify that
// WebHdfsInputStream is calling shouldRetry for the appropriate
// exceptions.
final RetryAction retryAction = new RetryAction(RetryDecision.RETRY);
final RetryAction failAction = new RetryAction(RetryDecision.FAIL);
RetryPolicy rp = new RetryPolicy() {
@Override
public RetryAction shouldRetry(Exception e, int retries, int failovers, boolean isIdempotentOrAtMostOnce) throws Exception {
attemptedRetry = true;
if (retries > 3) {
return failAction;
} else {
return retryAction;
}
}
};
fs.setRetryPolicy(rp);
// If the retry logic is exercised, attemptedRetry will be true. Some
// exceptions should exercise the retry logic and others should not.
// Either way, the value of attemptedRetry should match shouldAttemptRetry.
attemptedRetry = false;
try {
webIn.read();
fail(msg + ": Read should have thrown exception.");
} catch (Exception e) {
assertTrue(e.getMessage().contains(msg));
}
assertEquals(msg + ": Read should " + (shouldAttemptRetry ? "" : "not ") + "have called shouldRetry. ", attemptedRetry, shouldAttemptRetry);
verify(rr, times(numTimesTried)).getResponse((HttpURLConnection) any());
webIn.close();
in.close();
}
use of org.apache.hadoop.hdfs.web.WebHdfsFileSystem.WebHdfsInputStream in project hadoop by apache.
the class TestWebHDFS method testWebHdfsReadRetries.
@Test(timeout = 90000)
public void testWebHdfsReadRetries() throws Exception {
// ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
final Configuration conf = WebHdfsTestUtil.createConf();
final Path dir = new Path("/testWebHdfsReadRetries");
conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, true);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024 * 512);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
final short numDatanodes = 1;
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
try {
cluster.waitActive();
final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
//create a file
final long length = 1L << 20;
final Path file1 = new Path(dir, "testFile");
DFSTestUtil.createFile(fs, file1, length, numDatanodes, 20120406L);
//get file status and check that it was written properly.
final FileStatus s1 = fs.getFileStatus(file1);
assertEquals("Write failed for file " + file1, length, s1.getLen());
// Ensure file can be read through WebHdfsInputStream
FSDataInputStream in = fs.open(file1);
assertTrue("Input stream is not an instance of class WebHdfsInputStream", in.getWrappedStream() instanceof WebHdfsInputStream);
int count = 0;
for (; in.read() != -1; count++) ;
assertEquals("Read failed for file " + file1, s1.getLen(), count);
assertEquals("Sghould not be able to read beyond end of file", in.read(), -1);
in.close();
try {
in.read();
fail("Read after close should have failed");
} catch (IOException ioe) {
}
WebHdfsFileSystem wfs = (WebHdfsFileSystem) fs;
// Read should not be retried if AccessControlException is encountered.
String msg = "ReadRetries: Test Access Control Exception";
testReadRetryExceptionHelper(wfs, file1, new AccessControlException(msg), msg, false, 1);
// Retry policy should be invoked if IOExceptions are thrown.
msg = "ReadRetries: Test SocketTimeoutException";
testReadRetryExceptionHelper(wfs, file1, new SocketTimeoutException(msg), msg, true, 5);
msg = "ReadRetries: Test SocketException";
testReadRetryExceptionHelper(wfs, file1, new SocketException(msg), msg, true, 5);
msg = "ReadRetries: Test EOFException";
testReadRetryExceptionHelper(wfs, file1, new EOFException(msg), msg, true, 5);
msg = "ReadRetries: Test Generic IO Exception";
testReadRetryExceptionHelper(wfs, file1, new IOException(msg), msg, true, 5);
// If InvalidToken exception occurs, WebHdfs only retries if the
// delegation token was replaced. Do that twice, then verify by checking
// the number of times it tried.
WebHdfsFileSystem spyfs = spy(wfs);
when(spyfs.replaceExpiredDelegationToken()).thenReturn(true, true, false);
msg = "ReadRetries: Test Invalid Token Exception";
testReadRetryExceptionHelper(spyfs, file1, new InvalidToken(msg), msg, false, 3);
} finally {
cluster.shutdown();
}
}
Aggregations