use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class AppendTestUtil method check.
public static void check(DistributedFileSystem fs, Path p, int position, int length) throws IOException {
byte[] buf = new byte[length];
int i = 0;
try {
FSDataInputStream in = fs.open(p);
in.read(position, buf, 0, buf.length);
for (i = position; i < length + position; i++) {
assertEquals((byte) i, buf[i - position]);
}
in.close();
} catch (IOException ioe) {
throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
}
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestLeaseRecovery2 method verifyFile.
private void verifyFile(FileSystem dfs, Path filepath, byte[] actual, int size) throws IOException {
AppendTestUtil.LOG.info("Lease for file " + filepath + " is recovered. " + "Validating its contents now...");
// verify that file-size matches
assertTrue("File should be " + size + " bytes, but is actually " + " found to be " + dfs.getFileStatus(filepath).getLen() + " bytes", dfs.getFileStatus(filepath).getLen() == size);
// verify that there is enough data to read.
System.out.println("File size is good. Now validating sizes from datanodes...");
FSDataInputStream stmin = dfs.open(filepath);
stmin.readFully(0, actual, 0, size);
stmin.close();
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestPread method testTruncateWhileReading.
@Test
public void testTruncateWhileReading() throws Exception {
Path path = new Path("/testfile");
final int blockSize = 512;
// prevent initial pre-fetch of multiple block locations
Configuration conf = new Configuration();
conf.setLong(HdfsClientConfigKeys.Read.PREFETCH_SIZE_KEY, blockSize);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
DistributedFileSystem fs = cluster.getFileSystem();
// create multi-block file
FSDataOutputStream dos = fs.create(path, true, blockSize, (short) 1, blockSize);
dos.write(new byte[blockSize * 3]);
dos.close();
// truncate a file while it's open
final FSDataInputStream dis = fs.open(path);
while (!fs.truncate(path, 10)) {
Thread.sleep(10);
}
// verify that reading bytes outside the initial pre-fetch do
// not send the client into an infinite loop querying locations.
ExecutorService executor = Executors.newFixedThreadPool(1);
Future<?> future = executor.submit(new Callable<Void>() {
@Override
public Void call() throws IOException {
// read from 2nd block.
dis.readFully(blockSize, new byte[4]);
return null;
}
});
try {
future.get(4, TimeUnit.SECONDS);
Assert.fail();
} catch (ExecutionException ee) {
assertTrue(ee.toString(), ee.getCause() instanceof EOFException);
} finally {
future.cancel(true);
executor.shutdown();
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestPread method writeFile.
private void writeFile(FileSystem fileSys, Path name) throws IOException {
// We need > 1 blocks to test out the hedged reads.
int replication = 3;
// test empty file open and read
DFSTestUtil.createFile(fileSys, name, fileSize, 0, blockSize, (short) replication, seed);
FSDataInputStream in = fileSys.open(name);
byte[] buffer = new byte[fileSize];
in.readFully(0, buffer, 0, 0);
IOException res = null;
try {
// read beyond the end of the file
in.readFully(0, buffer, 0, 1);
} catch (IOException e) {
// should throw an exception
res = e;
}
assertTrue("Error reading beyond file boundary.", res != null);
in.close();
if (!fileSys.delete(name, true))
assertTrue("Cannot delete file", false);
// now create the real file
DFSTestUtil.createFile(fileSys, name, fileSize, fileSize, blockSize, (short) replication, seed);
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestPersistBlocks method testRestartDfsWithAbandonedBlock.
@Test
public void testRestartDfsWithAbandonedBlock() throws Exception {
final Configuration conf = new HdfsConfiguration();
// Turn off persistent IPC, so that the DFSClient can survive NN restart
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
MiniDFSCluster cluster = null;
long len = 0;
FSDataOutputStream stream;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
// Creating a file with 4096 blockSize to write multiple blocks
stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
stream.write(DATA_BEFORE_RESTART);
stream.hflush();
// Wait for all of the blocks to get through
while (len < BLOCK_SIZE * (NUM_BLOCKS - 1)) {
FileStatus status = fs.getFileStatus(FILE_PATH);
len = status.getLen();
Thread.sleep(100);
}
// Abandon the last block
DFSClient dfsclient = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs);
HdfsFileStatus fileStatus = dfsclient.getNamenode().getFileInfo(FILE_NAME);
LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(FILE_NAME, 0, BLOCK_SIZE * NUM_BLOCKS);
assertEquals(NUM_BLOCKS, blocks.getLocatedBlocks().size());
LocatedBlock b = blocks.getLastLocatedBlock();
dfsclient.getNamenode().abandonBlock(b.getBlock(), fileStatus.getFileId(), FILE_NAME, dfsclient.clientName);
// explicitly do NOT close the file.
cluster.restartNameNode();
// Check that the file has no less bytes than before the restart
// This would mean that blocks were successfully persisted to the log
FileStatus status = fs.getFileStatus(FILE_PATH);
assertTrue("Length incorrect: " + status.getLen(), status.getLen() == len - BLOCK_SIZE);
// Verify the data showed up from before restart, sans abandoned block.
FSDataInputStream readStream = fs.open(FILE_PATH);
try {
byte[] verifyBuf = new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE];
IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
byte[] expectedBuf = new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE];
System.arraycopy(DATA_BEFORE_RESTART, 0, expectedBuf, 0, expectedBuf.length);
assertArrayEquals(expectedBuf, verifyBuf);
} finally {
IOUtils.closeStream(readStream);
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations