use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestSmallBlock method checkFile.
private void checkFile(DistributedFileSystem fileSys, Path name) throws IOException {
BlockLocation[] locations = fileSys.getFileBlockLocations(fileSys.getFileStatus(name), 0, fileSize);
assertEquals("Number of blocks", fileSize, locations.length);
FSDataInputStream stm = fileSys.open(name);
byte[] expected = new byte[fileSize];
if (simulatedStorage) {
LocatedBlocks lbs = fileSys.getClient().getLocatedBlocks(name.toString(), 0, fileSize);
DFSTestUtil.fillExpectedBuf(lbs, expected);
} else {
Random rand = new Random(seed);
rand.nextBytes(expected);
}
// do a sanity check. Read the file
byte[] actual = new byte[fileSize];
stm.readFully(0, actual);
checkAndEraseData(actual, 0, expected, "Read Sanity Test");
stm.close();
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestWriteRead method readData.
/**
* Open the file to read from begin to end. Then close the file.
* Return number of bytes read.
* Support both sequential read and position read.
*/
private long readData(String fname, byte[] buffer, long byteExpected, long beginPosition) throws IOException {
long totalByteRead = 0;
Path path = getFullyQualifiedPath(fname);
FSDataInputStream in = null;
try {
in = openInputStream(path);
long visibleLenFromReadStream = ((HdfsDataInputStream) in).getVisibleLength();
if (visibleLenFromReadStream < byteExpected) {
throw new IOException(visibleLenFromReadStream + " = visibleLenFromReadStream < bytesExpected= " + byteExpected);
}
totalByteRead = readUntilEnd(in, buffer, buffer.length, fname, beginPosition, visibleLenFromReadStream, positionReadOption);
in.close();
// reading more data than visibleLeng is OK, but not less
if (totalByteRead + beginPosition < byteExpected) {
throw new IOException("readData mismatch in byte read: expected=" + byteExpected + " ; got " + (totalByteRead + beginPosition));
}
return totalByteRead + beginPosition;
} catch (IOException e) {
throw new IOException("##### Caught Exception in readData. " + "Total Byte Read so far = " + totalByteRead + " beginPosition = " + beginPosition, e);
} finally {
if (in != null)
in.close();
}
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestHASafeMode method testOpenFileWhenNNAndClientCrashAfterAddBlock.
/** Test NN crash and client crash/stuck immediately after block allocation */
@Test(timeout = 100000)
public void testOpenFileWhenNNAndClientCrashAfterAddBlock() throws Exception {
cluster.getConfiguration(0).set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "1.0f");
String testData = "testData";
// to make sure we write the full block before creating dummy block at NN.
cluster.getConfiguration(0).setInt("io.bytes.per.checksum", testData.length());
cluster.restartNameNode(0);
try {
cluster.waitActive();
cluster.transitionToActive(0);
cluster.transitionToStandby(1);
DistributedFileSystem dfs = cluster.getFileSystem(0);
String pathString = "/tmp1.txt";
Path filePath = new Path(pathString);
FSDataOutputStream create = dfs.create(filePath, FsPermission.getDefault(), true, 1024, (short) 3, testData.length(), null);
create.write(testData.getBytes());
create.hflush();
long fileId = ((DFSOutputStream) create.getWrappedStream()).getFileId();
FileStatus fileStatus = dfs.getFileStatus(filePath);
DFSClient client = DFSClientAdapter.getClient(dfs);
// add one dummy block at NN, but not write to DataNode
ExtendedBlock previousBlock = DFSClientAdapter.getPreviousBlock(client, fileId);
DFSClientAdapter.getNamenode(client).addBlock(pathString, client.getClientName(), new ExtendedBlock(previousBlock), new DatanodeInfo[0], DFSClientAdapter.getFileId((DFSOutputStream) create.getWrappedStream()), null, null);
cluster.restartNameNode(0, true);
cluster.restartDataNode(0);
cluster.transitionToActive(0);
// let the block reports be processed.
Thread.sleep(2000);
FSDataInputStream is = dfs.open(filePath);
is.close();
// initiate recovery
dfs.recoverLease(filePath);
assertTrue("Recovery also should be success", dfs.recoverLease(filePath));
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestWebHDFS method testReadRetryExceptionHelper.
private void testReadRetryExceptionHelper(WebHdfsFileSystem fs, Path fn, final IOException ex, String msg, boolean shouldAttemptRetry, int numTimesTried) throws Exception {
// Ovverride WebHdfsInputStream#getInputStream so that it returns
// an input stream that throws the specified exception when read
// is called.
FSDataInputStream in = fs.open(fn);
// Connection is made only when the first read() occurs.
in.read();
final WebHdfsInputStream webIn = (WebHdfsInputStream) (in.getWrappedStream());
final InputStream spyInputStream = spy(webIn.getReadRunner().getInputStream());
doThrow(ex).when(spyInputStream).read((byte[]) any(), anyInt(), anyInt());
final WebHdfsFileSystem.ReadRunner rr = spy(webIn.getReadRunner());
doReturn(spyInputStream).when(rr).initializeInputStream((HttpURLConnection) any());
rr.setInputStream(spyInputStream);
webIn.setReadRunner(rr);
// Override filesystem's retry policy in order to verify that
// WebHdfsInputStream is calling shouldRetry for the appropriate
// exceptions.
final RetryAction retryAction = new RetryAction(RetryDecision.RETRY);
final RetryAction failAction = new RetryAction(RetryDecision.FAIL);
RetryPolicy rp = new RetryPolicy() {
@Override
public RetryAction shouldRetry(Exception e, int retries, int failovers, boolean isIdempotentOrAtMostOnce) throws Exception {
attemptedRetry = true;
if (retries > 3) {
return failAction;
} else {
return retryAction;
}
}
};
fs.setRetryPolicy(rp);
// If the retry logic is exercised, attemptedRetry will be true. Some
// exceptions should exercise the retry logic and others should not.
// Either way, the value of attemptedRetry should match shouldAttemptRetry.
attemptedRetry = false;
try {
webIn.read();
fail(msg + ": Read should have thrown exception.");
} catch (Exception e) {
assertTrue(e.getMessage().contains(msg));
}
assertEquals(msg + ": Read should " + (shouldAttemptRetry ? "" : "not ") + "have called shouldRetry. ", attemptedRetry, shouldAttemptRetry);
verify(rr, times(numTimesTried)).getResponse((HttpURLConnection) any());
webIn.close();
in.close();
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestWebHDFS method verifySeek.
/** test seek */
static void verifySeek(FileSystem fs, Path p, long offset, long length, byte[] buf, byte[] expected) throws IOException {
long remaining = length - offset;
long checked = 0;
LOG.info("XXX SEEK: offset=" + offset + ", remaining=" + remaining);
final Ticker t = new Ticker("SEEK", "offset=%d, remaining=%d", offset, remaining);
final FSDataInputStream in = fs.open(p, 64 << 10);
in.seek(offset);
for (; remaining > 0; ) {
t.tick(checked, "offset=%d, remaining=%d", offset, remaining);
final int n = (int) Math.min(remaining, buf.length);
in.readFully(buf, 0, n);
checkData(offset, remaining, n, buf, expected);
offset += n;
remaining -= n;
checked += n;
}
in.close();
t.end(checked);
}
Aggregations