Search in sources :

Example 56 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class HttpFSFileSystem method listStatusBatch.

@Override
public DirectoryEntries listStatusBatch(Path f, byte[] token) throws FileNotFoundException, IOException {
    Map<String, String> params = new HashMap<String, String>();
    params.put(OP_PARAM, Operation.LISTSTATUS_BATCH.toString());
    if (token != null) {
        params.put(START_AFTER_PARAM, new String(token, Charsets.UTF_8));
    }
    HttpURLConnection conn = getConnection(Operation.LISTSTATUS_BATCH.getMethod(), params, f, true);
    HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
    // Parse the FileStatus array
    JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
    JSONObject listing = (JSONObject) json.get(DIRECTORY_LISTING_JSON);
    FileStatus[] statuses = toFileStatuses((JSONObject) listing.get(PARTIAL_LISTING_JSON), f);
    // New token is the last FileStatus entry
    byte[] newToken = null;
    if (statuses.length > 0) {
        newToken = statuses[statuses.length - 1].getPath().getName().toString().getBytes(Charsets.UTF_8);
    }
    // Parse the remainingEntries boolean into hasMore
    final long remainingEntries = (Long) listing.get(REMAINING_ENTRIES_JSON);
    final boolean hasMore = remainingEntries > 0 ? true : false;
    return new DirectoryEntries(statuses, newToken, hasMore);
}
Also used : HttpURLConnection(java.net.HttpURLConnection) FileStatus(org.apache.hadoop.fs.FileStatus) JSONObject(org.json.simple.JSONObject) HashMap(java.util.HashMap)

Example 57 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class HttpFSFileSystem method createFileStatus.

private FileStatus createFileStatus(Path parent, JSONObject json) {
    String pathSuffix = (String) json.get(PATH_SUFFIX_JSON);
    Path path = (pathSuffix.equals("")) ? parent : new Path(parent, pathSuffix);
    FILE_TYPE type = FILE_TYPE.valueOf((String) json.get(TYPE_JSON));
    long len = (Long) json.get(LENGTH_JSON);
    String owner = (String) json.get(OWNER_JSON);
    String group = (String) json.get(GROUP_JSON);
    final FsPermission permission = toFsPermission(json);
    long aTime = (Long) json.get(ACCESS_TIME_JSON);
    long mTime = (Long) json.get(MODIFICATION_TIME_JSON);
    long blockSize = (Long) json.get(BLOCK_SIZE_JSON);
    short replication = ((Long) json.get(REPLICATION_JSON)).shortValue();
    FileStatus fileStatus = null;
    switch(type) {
        case FILE:
        case DIRECTORY:
            fileStatus = new FileStatus(len, (type == FILE_TYPE.DIRECTORY), replication, blockSize, mTime, aTime, permission, owner, group, path);
            break;
        case SYMLINK:
            Path symLink = null;
            fileStatus = new FileStatus(len, false, replication, blockSize, mTime, aTime, permission, owner, group, symLink, path);
    }
    return fileStatus;
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Example 58 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestFileStatus method testGetFileStatusOnFile.

/** Test the FileStatus obtained calling getFileStatus on a file */
@Test
public void testGetFileStatusOnFile() throws Exception {
    checkFile(fs, file1, 1);
    // test getFileStatus on a file
    FileStatus status = fs.getFileStatus(file1);
    assertFalse(file1 + " should be a file", status.isDirectory());
    assertEquals(blockSize, status.getBlockSize());
    assertEquals(1, status.getReplication());
    assertEquals(fileSize, status.getLen());
    assertEquals(file1.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString(), status.getPath().toString());
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Test(org.junit.Test)

Example 59 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TestINodeFile method testInodeIdBasedPaths.

/**
   * Tests for addressing files using /.reserved/.inodes/<inodeID> in file system
   * operations.
   */
@Test
public void testInodeIdBasedPaths() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        NamenodeProtocols nnRpc = cluster.getNameNodeRpc();
        // FileSystem#mkdirs "/testInodeIdBasedPaths"
        Path baseDir = getInodePath(INodeId.ROOT_INODE_ID, "testInodeIdBasedPaths");
        Path baseDirRegPath = new Path("/testInodeIdBasedPaths");
        fs.mkdirs(baseDir);
        fs.exists(baseDir);
        long baseDirFileId = nnRpc.getFileInfo(baseDir.toString()).getFileId();
        // FileSystem#create file and FileSystem#close
        Path testFileInodePath = getInodePath(baseDirFileId, "test1");
        Path testFileRegularPath = new Path(baseDir, "test1");
        final int testFileBlockSize = 1024;
        FileSystemTestHelper.createFile(fs, testFileInodePath, 1, testFileBlockSize);
        assertTrue(fs.exists(testFileInodePath));
        // FileSystem#setPermission
        FsPermission perm = new FsPermission((short) 0666);
        fs.setPermission(testFileInodePath, perm);
        // FileSystem#getFileStatus and FileSystem#getPermission
        FileStatus fileStatus = fs.getFileStatus(testFileInodePath);
        assertEquals(perm, fileStatus.getPermission());
        // FileSystem#setOwner
        fs.setOwner(testFileInodePath, fileStatus.getOwner(), fileStatus.getGroup());
        // FileSystem#setTimes
        fs.setTimes(testFileInodePath, 0, 0);
        fileStatus = fs.getFileStatus(testFileInodePath);
        assertEquals(0, fileStatus.getModificationTime());
        assertEquals(0, fileStatus.getAccessTime());
        // FileSystem#setReplication
        fs.setReplication(testFileInodePath, (short) 3);
        fileStatus = fs.getFileStatus(testFileInodePath);
        assertEquals(3, fileStatus.getReplication());
        fs.setReplication(testFileInodePath, (short) 1);
        // ClientProtocol#getPreferredBlockSize
        assertEquals(testFileBlockSize, nnRpc.getPreferredBlockSize(testFileInodePath.toString()));
        /*
       * HDFS-6749 added missing calls to FSDirectory.resolvePath in the
       * following four methods. The calls below ensure that
       * /.reserved/.inodes paths work properly. No need to check return
       * values as these methods are tested elsewhere.
       */
        {
            fs.isFileClosed(testFileInodePath);
            fs.getAclStatus(testFileInodePath);
            fs.getXAttrs(testFileInodePath);
            fs.listXAttrs(testFileInodePath);
            fs.access(testFileInodePath, FsAction.READ_WRITE);
        }
        // symbolic link related tests
        // Reserved path is not allowed as a target
        String invalidTarget = new Path(baseDir, "invalidTarget").toString();
        String link = new Path(baseDir, "link").toString();
        testInvalidSymlinkTarget(nnRpc, invalidTarget, link);
        // Test creating a link using reserved inode path
        String validTarget = "/validtarget";
        testValidSymlinkTarget(nnRpc, validTarget, link);
        // FileSystem#append
        fs.append(testFileInodePath);
        // DistributedFileSystem#recoverLease
        fs.recoverLease(testFileInodePath);
        // Namenode#getBlockLocations
        LocatedBlocks l1 = nnRpc.getBlockLocations(testFileInodePath.toString(), 0, Long.MAX_VALUE);
        LocatedBlocks l2 = nnRpc.getBlockLocations(testFileRegularPath.toString(), 0, Long.MAX_VALUE);
        checkEquals(l1, l2);
        // FileSystem#rename - both the variants
        Path renameDst = getInodePath(baseDirFileId, "test2");
        fileStatus = fs.getFileStatus(testFileInodePath);
        // Rename variant 1: rename and rename bacck
        fs.rename(testFileInodePath, renameDst);
        fs.rename(renameDst, testFileInodePath);
        assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
        // Rename variant 2: rename and rename bacck
        fs.rename(testFileInodePath, renameDst, Rename.OVERWRITE);
        fs.rename(renameDst, testFileInodePath, Rename.OVERWRITE);
        assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
        // FileSystem#getContentSummary
        assertEquals(fs.getContentSummary(testFileRegularPath).toString(), fs.getContentSummary(testFileInodePath).toString());
        // FileSystem#listFiles
        checkEquals(fs.listFiles(baseDirRegPath, false), fs.listFiles(baseDir, false));
        // FileSystem#delete
        fs.delete(testFileInodePath, true);
        assertFalse(fs.exists(testFileInodePath));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FsPermission(org.apache.hadoop.fs.permission.FsPermission) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 60 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class TaskAttemptImpl method createLocalResource.

/**
   * Create a {@link LocalResource} record with all the given parameters.
   */
private static LocalResource createLocalResource(FileSystem fc, Path file, LocalResourceType type, LocalResourceVisibility visibility) throws IOException {
    FileStatus fstat = fc.getFileStatus(file);
    URL resourceURL = URL.fromPath(fc.resolvePath(fstat.getPath()));
    long resourceSize = fstat.getLen();
    long resourceModificationTime = fstat.getModificationTime();
    return LocalResource.newInstance(resourceURL, type, visibility, resourceSize, resourceModificationTime);
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) URL(org.apache.hadoop.yarn.api.records.URL)

Aggregations

FileStatus (org.apache.hadoop.fs.FileStatus)1156 Path (org.apache.hadoop.fs.Path)910 FileSystem (org.apache.hadoop.fs.FileSystem)417 Test (org.junit.Test)372 IOException (java.io.IOException)296 Configuration (org.apache.hadoop.conf.Configuration)187 ArrayList (java.util.ArrayList)175 FileNotFoundException (java.io.FileNotFoundException)136 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)105 FsPermission (org.apache.hadoop.fs.permission.FsPermission)86 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)67 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)65 HashMap (java.util.HashMap)54 File (java.io.File)41 URI (java.net.URI)41 PathFilter (org.apache.hadoop.fs.PathFilter)38 BufferedReader (java.io.BufferedReader)30 InputStreamReader (java.io.InputStreamReader)30 BlockLocation (org.apache.hadoop.fs.BlockLocation)30 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)30