use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestPersistBlocks method testRestartDfsWithAbandonedBlock.
@Test
public void testRestartDfsWithAbandonedBlock() throws Exception {
final Configuration conf = new HdfsConfiguration();
// Turn off persistent IPC, so that the DFSClient can survive NN restart
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
MiniDFSCluster cluster = null;
long len = 0;
FSDataOutputStream stream;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
// Creating a file with 4096 blockSize to write multiple blocks
stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
stream.write(DATA_BEFORE_RESTART);
stream.hflush();
// Wait for all of the blocks to get through
while (len < BLOCK_SIZE * (NUM_BLOCKS - 1)) {
FileStatus status = fs.getFileStatus(FILE_PATH);
len = status.getLen();
Thread.sleep(100);
}
// Abandon the last block
DFSClient dfsclient = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs);
HdfsFileStatus fileStatus = dfsclient.getNamenode().getFileInfo(FILE_NAME);
LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(FILE_NAME, 0, BLOCK_SIZE * NUM_BLOCKS);
assertEquals(NUM_BLOCKS, blocks.getLocatedBlocks().size());
LocatedBlock b = blocks.getLastLocatedBlock();
dfsclient.getNamenode().abandonBlock(b.getBlock(), fileStatus.getFileId(), FILE_NAME, dfsclient.clientName);
// explicitly do NOT close the file.
cluster.restartNameNode();
// Check that the file has no less bytes than before the restart
// This would mean that blocks were successfully persisted to the log
FileStatus status = fs.getFileStatus(FILE_PATH);
assertTrue("Length incorrect: " + status.getLen(), status.getLen() == len - BLOCK_SIZE);
// Verify the data showed up from before restart, sans abandoned block.
FSDataInputStream readStream = fs.open(FILE_PATH);
try {
byte[] verifyBuf = new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE];
IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
byte[] expectedBuf = new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE];
System.arraycopy(DATA_BEFORE_RESTART, 0, expectedBuf, 0, expectedBuf.length);
assertArrayEquals(expectedBuf, verifyBuf);
} finally {
IOUtils.closeStream(readStream);
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestPersistBlocks method testRestartDfs.
/** check if DFS remains in proper condition after a restart
* @param useFlush - if true then flush is used instead of sync (ie hflush)
*/
void testRestartDfs(boolean useFlush) throws Exception {
final Configuration conf = new HdfsConfiguration();
// Turn off persistent IPC, so that the DFSClient can survive NN restart
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
MiniDFSCluster cluster = null;
long len = 0;
FSDataOutputStream stream;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
// Creating a file with 4096 blockSize to write multiple blocks
stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
stream.write(DATA_BEFORE_RESTART);
if (useFlush)
stream.flush();
else
stream.hflush();
// Wait for at least a few blocks to get through
while (len <= BLOCK_SIZE) {
FileStatus status = fs.getFileStatus(FILE_PATH);
len = status.getLen();
Thread.sleep(100);
}
// explicitly do NOT close the file.
cluster.restartNameNode();
// Check that the file has no less bytes than before the restart
// This would mean that blocks were successfully persisted to the log
FileStatus status = fs.getFileStatus(FILE_PATH);
assertTrue("Length too short: " + status.getLen(), status.getLen() >= len);
// And keep writing (ensures that leases are also persisted correctly)
stream.write(DATA_AFTER_RESTART);
stream.close();
// Verify that the data showed up, both from before and after the restart.
FSDataInputStream readStream = fs.open(FILE_PATH);
try {
byte[] verifyBuf = new byte[DATA_BEFORE_RESTART.length];
IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
assertArrayEquals(DATA_BEFORE_RESTART, verifyBuf);
IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
assertArrayEquals(DATA_AFTER_RESTART, verifyBuf);
} finally {
IOUtils.closeStream(readStream);
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestCreateEditsLog method testCanLoadCreatedEditsLog.
/**
* Tests that an edits log created using CreateEditsLog is valid and can be
* loaded successfully by a namenode.
*/
@Test(timeout = 60000)
public void testCanLoadCreatedEditsLog() throws Exception {
// Format namenode.
HdfsConfiguration conf = new HdfsConfiguration();
File nameDir = new File(HDFS_DIR, "name");
conf.set(DFS_NAMENODE_NAME_DIR_KEY, Util.fileAsURI(nameDir).toString());
DFSTestUtil.formatNameNode(conf);
// Call CreateEditsLog and move the resulting edits to the name dir.
CreateEditsLog.main(new String[] { "-f", "1000", "0", "1", "-d", TEST_DIR.getAbsolutePath() });
Path editsWildcard = new Path(TEST_DIR.getAbsolutePath(), "*");
FileContext localFc = FileContext.getLocalFSFileContext();
for (FileStatus edits : localFc.util().globStatus(editsWildcard)) {
Path src = edits.getPath();
Path dst = new Path(new File(nameDir, "current").getAbsolutePath(), src.getName());
localFc.rename(src, dst);
}
// Start a namenode to try to load the edits.
cluster = new MiniDFSCluster.Builder(conf).format(false).manageNameDfsDirs(false).waitSafeMode(false).build();
cluster.waitClusterUp();
// Test successful, because no exception thrown.
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestAuditLogs method testAuditAllowedStat.
/** test that allowed stat puts proper entry in audit log */
@Test
public void testAuditAllowedStat() throws Exception {
final Path file = new Path(fnames[0]);
FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
setupAuditLogs();
FileStatus st = userfs.getFileStatus(file);
verifyAuditLogs(true);
assertTrue("failed to stat file", st != null && st.isFile());
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestAuditLogs method testAuditWebHdfsStat.
/** test that stat via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsStat() throws Exception {
final Path file = new Path(fnames[0]);
fs.setPermission(file, new FsPermission((short) 0644));
fs.setOwner(file, "root", null);
setupAuditLogs();
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsConstants.WEBHDFS_SCHEME);
FileStatus st = webfs.getFileStatus(file);
verifyAuditLogs(true);
assertTrue("failed to stat file", st != null && st.isFile());
}
Aggregations