Search in sources :

Example 66 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestPersistBlocks method testRestartDfsWithAbandonedBlock.

@Test
public void testRestartDfsWithAbandonedBlock() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    // Turn off persistent IPC, so that the DFSClient can survive NN restart
    conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
    MiniDFSCluster cluster = null;
    long len = 0;
    FSDataOutputStream stream;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        FileSystem fs = cluster.getFileSystem();
        // Creating a file with 4096 blockSize to write multiple blocks
        stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
        stream.write(DATA_BEFORE_RESTART);
        stream.hflush();
        // Wait for all of the blocks to get through
        while (len < BLOCK_SIZE * (NUM_BLOCKS - 1)) {
            FileStatus status = fs.getFileStatus(FILE_PATH);
            len = status.getLen();
            Thread.sleep(100);
        }
        // Abandon the last block
        DFSClient dfsclient = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs);
        HdfsFileStatus fileStatus = dfsclient.getNamenode().getFileInfo(FILE_NAME);
        LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(FILE_NAME, 0, BLOCK_SIZE * NUM_BLOCKS);
        assertEquals(NUM_BLOCKS, blocks.getLocatedBlocks().size());
        LocatedBlock b = blocks.getLastLocatedBlock();
        dfsclient.getNamenode().abandonBlock(b.getBlock(), fileStatus.getFileId(), FILE_NAME, dfsclient.clientName);
        // explicitly do NOT close the file.
        cluster.restartNameNode();
        // Check that the file has no less bytes than before the restart
        // This would mean that blocks were successfully persisted to the log
        FileStatus status = fs.getFileStatus(FILE_PATH);
        assertTrue("Length incorrect: " + status.getLen(), status.getLen() == len - BLOCK_SIZE);
        // Verify the data showed up from before restart, sans abandoned block.
        FSDataInputStream readStream = fs.open(FILE_PATH);
        try {
            byte[] verifyBuf = new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE];
            IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
            byte[] expectedBuf = new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE];
            System.arraycopy(DATA_BEFORE_RESTART, 0, expectedBuf, 0, expectedBuf.length);
            assertArrayEquals(expectedBuf, verifyBuf);
        } finally {
            IOUtils.closeStream(readStream);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) FileSystem(org.apache.hadoop.fs.FileSystem) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 67 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestPersistBlocks method testRestartDfs.

/** check if DFS remains in proper condition after a restart
   * @param useFlush - if true then flush is used instead of sync (ie hflush)
   */
void testRestartDfs(boolean useFlush) throws Exception {
    final Configuration conf = new HdfsConfiguration();
    // Turn off persistent IPC, so that the DFSClient can survive NN restart
    conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
    MiniDFSCluster cluster = null;
    long len = 0;
    FSDataOutputStream stream;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        FileSystem fs = cluster.getFileSystem();
        // Creating a file with 4096 blockSize to write multiple blocks
        stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
        stream.write(DATA_BEFORE_RESTART);
        if (useFlush)
            stream.flush();
        else
            stream.hflush();
        // Wait for at least a few blocks to get through
        while (len <= BLOCK_SIZE) {
            FileStatus status = fs.getFileStatus(FILE_PATH);
            len = status.getLen();
            Thread.sleep(100);
        }
        // explicitly do NOT close the file.
        cluster.restartNameNode();
        // Check that the file has no less bytes than before the restart
        // This would mean that blocks were successfully persisted to the log
        FileStatus status = fs.getFileStatus(FILE_PATH);
        assertTrue("Length too short: " + status.getLen(), status.getLen() >= len);
        // And keep writing (ensures that leases are also persisted correctly)
        stream.write(DATA_AFTER_RESTART);
        stream.close();
        // Verify that the data showed up, both from before and after the restart.
        FSDataInputStream readStream = fs.open(FILE_PATH);
        try {
            byte[] verifyBuf = new byte[DATA_BEFORE_RESTART.length];
            IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
            assertArrayEquals(DATA_BEFORE_RESTART, verifyBuf);
            IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
            assertArrayEquals(DATA_AFTER_RESTART, verifyBuf);
        } finally {
            IOUtils.closeStream(readStream);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 68 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestCreateEditsLog method testCanLoadCreatedEditsLog.

/**
   * Tests that an edits log created using CreateEditsLog is valid and can be
   * loaded successfully by a namenode.
   */
@Test(timeout = 60000)
public void testCanLoadCreatedEditsLog() throws Exception {
    // Format namenode.
    HdfsConfiguration conf = new HdfsConfiguration();
    File nameDir = new File(HDFS_DIR, "name");
    conf.set(DFS_NAMENODE_NAME_DIR_KEY, Util.fileAsURI(nameDir).toString());
    DFSTestUtil.formatNameNode(conf);
    // Call CreateEditsLog and move the resulting edits to the name dir.
    CreateEditsLog.main(new String[] { "-f", "1000", "0", "1", "-d", TEST_DIR.getAbsolutePath() });
    Path editsWildcard = new Path(TEST_DIR.getAbsolutePath(), "*");
    FileContext localFc = FileContext.getLocalFSFileContext();
    for (FileStatus edits : localFc.util().globStatus(editsWildcard)) {
        Path src = edits.getPath();
        Path dst = new Path(new File(nameDir, "current").getAbsolutePath(), src.getName());
        localFc.rename(src, dst);
    }
    // Start a namenode to try to load the edits.
    cluster = new MiniDFSCluster.Builder(conf).format(false).manageNameDfsDirs(false).waitSafeMode(false).build();
    cluster.waitClusterUp();
// Test successful, because no exception thrown.
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) File(java.io.File) FileContext(org.apache.hadoop.fs.FileContext) Test(org.junit.Test)

Example 69 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestAuditLogs method testAuditAllowedStat.

/** test that allowed stat puts proper entry in audit log */
@Test
public void testAuditAllowedStat() throws Exception {
    final Path file = new Path(fnames[0]);
    FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
    setupAuditLogs();
    FileStatus st = userfs.getFileStatus(file);
    verifyAuditLogs(true);
    assertTrue("failed to stat file", st != null && st.isFile());
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) FileSystem(org.apache.hadoop.fs.FileSystem) WebHdfsFileSystem(org.apache.hadoop.hdfs.web.WebHdfsFileSystem) Test(org.junit.Test)

Example 70 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestAuditLogs method testAuditWebHdfsStat.

/** test that stat via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsStat() throws Exception {
    final Path file = new Path(fnames[0]);
    fs.setPermission(file, new FsPermission((short) 0644));
    fs.setOwner(file, "root", null);
    setupAuditLogs();
    WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsConstants.WEBHDFS_SCHEME);
    FileStatus st = webfs.getFileStatus(file);
    verifyAuditLogs(true);
    assertTrue("failed to stat file", st != null && st.isFile());
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) FsPermission(org.apache.hadoop.fs.permission.FsPermission) WebHdfsFileSystem(org.apache.hadoop.hdfs.web.WebHdfsFileSystem) Test(org.junit.Test)

Aggregations

FileStatus (org.apache.hadoop.fs.FileStatus)1156 Path (org.apache.hadoop.fs.Path)910 FileSystem (org.apache.hadoop.fs.FileSystem)417 Test (org.junit.Test)372 IOException (java.io.IOException)296 Configuration (org.apache.hadoop.conf.Configuration)187 ArrayList (java.util.ArrayList)175 FileNotFoundException (java.io.FileNotFoundException)136 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)105 FsPermission (org.apache.hadoop.fs.permission.FsPermission)86 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)67 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)65 HashMap (java.util.HashMap)54 File (java.io.File)41 URI (java.net.URI)41 PathFilter (org.apache.hadoop.fs.PathFilter)38 BufferedReader (java.io.BufferedReader)30 InputStreamReader (java.io.InputStreamReader)30 BlockLocation (org.apache.hadoop.fs.BlockLocation)30 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)30