Search in sources :

Example 31 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class AbstractContractGetFileStatusTest method verifyFileStats.

/**
   * Scan through a filestatus iterator, get the status of every element and
   * verify core attributes. This should identify a situation where the
   * attributes of a file/dir retrieved in a listing operation do not
   * match the values individually retrieved. That is: the metadata returned
   * in a directory listing is different from the explicitly retrieved data.
   *
   * Timestamps are not compared.
   * @param results iterator to scan
   * @return the number of entries in the result set
   * @throws IOException any IO problem
   */
private int verifyFileStats(RemoteIterator<LocatedFileStatus> results) throws IOException {
    describe("verifying file statuses");
    int count = 0;
    while (results.hasNext()) {
        count++;
        LocatedFileStatus next = results.next();
        FileStatus fileStatus = getFileSystem().getFileStatus(next.getPath());
        assertEquals("isDirectory", fileStatus.isDirectory(), next.isDirectory());
        assertEquals("isFile", fileStatus.isFile(), next.isFile());
        assertEquals("getLen", fileStatus.getLen(), next.getLen());
        assertEquals("getOwner", fileStatus.getOwner(), next.getOwner());
    }
    return count;
}
Also used : LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus)

Example 32 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class RollingFileSystemSinkTestBase method readLogFile.

/**
   * Read the log files at the target path and return the contents as a single
   * string. This method will assert that the correct number of files is found.
   *
   * @param path the target path
   * @param then when the test method began. Used to find the log directory in
   * the case that the test run crosses the top of the hour.
   * @param count the number of log files to expect
   * @return
   * @throws IOException
   * @throws URISyntaxException
   */
protected String readLogFile(String path, String then, int count) throws IOException, URISyntaxException {
    final String now = DATE_FORMAT.format(new Date()) + "00";
    final String logFile = getLogFilename();
    FileSystem fs = FileSystem.get(new URI(path), new Configuration());
    StringBuilder metrics = new StringBuilder();
    boolean found = false;
    for (FileStatus status : fs.listStatus(new Path(path))) {
        Path logDir = status.getPath();
        // the test started and the current time.  Anything else can be ignored.
        if (now.equals(logDir.getName()) || then.equals(logDir.getName())) {
            readLogData(fs, findMostRecentLogFile(fs, new Path(logDir, logFile)), metrics);
            assertFileCount(fs, logDir, count);
            found = true;
        }
    }
    assertTrue("No valid log directories found", found);
    return metrics.toString();
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) Configuration(org.apache.hadoop.conf.Configuration) SubsetConfiguration(org.apache.commons.configuration2.SubsetConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) URI(java.net.URI) Date(java.util.Date)

Example 33 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class StripedFileTestUtil method verifyLength.

static void verifyLength(FileSystem fs, Path srcPath, int fileLength) throws IOException {
    FileStatus status = fs.getFileStatus(srcPath);
    assertEquals("File length should be the same", fileLength, status.getLen());
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus)

Example 34 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestSafeMode method testSafeModeWhenZeroBlockLocations.

@Test
public void testSafeModeWhenZeroBlockLocations() throws IOException {
    try {
        Path file1 = new Path("/tmp/testManualSafeMode/file1");
        Path file2 = new Path("/tmp/testManualSafeMode/file2");
        System.out.println("Created file1 and file2.");
        // create two files with one block each.
        DFSTestUtil.createFile(fs, file1, 1000, (short) 1, 0);
        DFSTestUtil.createFile(fs, file2, 2000, (short) 1, 0);
        checkGetBlockLocationsWorks(fs, file1);
        NameNode namenode = cluster.getNameNode();
        // manually set safemode.
        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        assertTrue("should still be in SafeMode", namenode.isInSafeMode());
        // getBlock locations should still work since block locations exists
        checkGetBlockLocationsWorks(fs, file1);
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        assertFalse("should not be in SafeMode", namenode.isInSafeMode());
        // Now 2nd part of the tests where there aren't block locations
        cluster.shutdownDataNodes();
        cluster.shutdownNameNode(0);
        // now bring up just the NameNode.
        cluster.restartNameNode();
        cluster.waitActive();
        System.out.println("Restarted cluster with just the NameNode");
        namenode = cluster.getNameNode();
        assertTrue("No datanode is started. Should be in SafeMode", namenode.isInSafeMode());
        FileStatus stat = fs.getFileStatus(file1);
        try {
            fs.getFileBlockLocations(stat, 0, 1000);
            assertTrue("Should have got safemode exception", false);
        } catch (SafeModeException e) {
        // as expected 
        } catch (RemoteException re) {
            if (!re.getClassName().equals(SafeModeException.class.getName()))
                assertTrue("Should have got safemode exception", false);
        }
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        assertFalse("Should not be in safemode", namenode.isInSafeMode());
        checkGetBlockLocationsWorks(fs, file1);
    } finally {
        if (fs != null)
            fs.close();
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) FileStatus(org.apache.hadoop.fs.FileStatus) SafeModeException(org.apache.hadoop.hdfs.server.namenode.SafeModeException) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 35 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestReplaceDatanodeOnFailure method testBestEffort.

@Test
public void testBestEffort() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    //always replace a datanode but do not throw exception
    ReplaceDatanodeOnFailure.write(Policy.ALWAYS, true, conf);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    try {
        final DistributedFileSystem fs = cluster.getFileSystem();
        final Path f = new Path(DIR, "testIgnoreReplaceFailure");
        final byte[] bytes = new byte[1000];
        {
            LOG.info("write " + bytes.length + " bytes to " + f);
            final FSDataOutputStream out = fs.create(f, REPLICATION);
            out.write(bytes);
            out.close();
            final FileStatus status = fs.getFileStatus(f);
            Assert.assertEquals(REPLICATION, status.getReplication());
            Assert.assertEquals(bytes.length, status.getLen());
        }
        {
            LOG.info("append another " + bytes.length + " bytes to " + f);
            final FSDataOutputStream out = fs.append(f);
            out.write(bytes);
            out.close();
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Aggregations

FileStatus (org.apache.hadoop.fs.FileStatus)1156 Path (org.apache.hadoop.fs.Path)910 FileSystem (org.apache.hadoop.fs.FileSystem)417 Test (org.junit.Test)372 IOException (java.io.IOException)296 Configuration (org.apache.hadoop.conf.Configuration)187 ArrayList (java.util.ArrayList)175 FileNotFoundException (java.io.FileNotFoundException)136 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)105 FsPermission (org.apache.hadoop.fs.permission.FsPermission)86 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)67 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)65 HashMap (java.util.HashMap)54 File (java.io.File)41 URI (java.net.URI)41 PathFilter (org.apache.hadoop.fs.PathFilter)38 BufferedReader (java.io.BufferedReader)30 InputStreamReader (java.io.InputStreamReader)30 BlockLocation (org.apache.hadoop.fs.BlockLocation)30 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)30