Search in sources :

Example 1 with StatisticsData

use of org.apache.hadoop.fs.FileSystem.Statistics.StatisticsData in project hadoop by apache.

the class TestDistributedFileSystem method testStatistics.

@Test
public void testStatistics() throws IOException {
    FileSystem.getStatistics(HdfsConstants.HDFS_URI_SCHEME, DistributedFileSystem.class).reset();
    @SuppressWarnings("unchecked") ThreadLocal<StatisticsData> data = (ThreadLocal<StatisticsData>) Whitebox.getInternalState(FileSystem.getStatistics(HdfsConstants.HDFS_URI_SCHEME, DistributedFileSystem.class), "threadData");
    data.set(null);
    int lsLimit = 2;
    final Configuration conf = getTestConfiguration();
    conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, lsLimit);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        final FileSystem fs = cluster.getFileSystem();
        Path dir = new Path("/test");
        Path file = new Path(dir, "file");
        int readOps = 0;
        int writeOps = 0;
        int largeReadOps = 0;
        long opCount = getOpStatistics(OpType.MKDIRS);
        fs.mkdirs(dir);
        checkStatistics(fs, readOps, ++writeOps, largeReadOps);
        checkOpStatistics(OpType.MKDIRS, opCount + 1);
        opCount = getOpStatistics(OpType.CREATE);
        FSDataOutputStream out = fs.create(file, (short) 1);
        out.close();
        checkStatistics(fs, readOps, ++writeOps, largeReadOps);
        checkOpStatistics(OpType.CREATE, opCount + 1);
        opCount = getOpStatistics(OpType.GET_FILE_STATUS);
        FileStatus status = fs.getFileStatus(file);
        checkStatistics(fs, ++readOps, writeOps, largeReadOps);
        checkOpStatistics(OpType.GET_FILE_STATUS, opCount + 1);
        opCount = getOpStatistics(OpType.GET_FILE_BLOCK_LOCATIONS);
        fs.getFileBlockLocations(file, 0, 0);
        checkStatistics(fs, ++readOps, writeOps, largeReadOps);
        checkOpStatistics(OpType.GET_FILE_BLOCK_LOCATIONS, opCount + 1);
        fs.getFileBlockLocations(status, 0, 0);
        checkStatistics(fs, ++readOps, writeOps, largeReadOps);
        checkOpStatistics(OpType.GET_FILE_BLOCK_LOCATIONS, opCount + 2);
        opCount = getOpStatistics(OpType.OPEN);
        FSDataInputStream in = fs.open(file);
        in.close();
        checkStatistics(fs, ++readOps, writeOps, largeReadOps);
        checkOpStatistics(OpType.OPEN, opCount + 1);
        opCount = getOpStatistics(OpType.SET_REPLICATION);
        fs.setReplication(file, (short) 2);
        checkStatistics(fs, readOps, ++writeOps, largeReadOps);
        checkOpStatistics(OpType.SET_REPLICATION, opCount + 1);
        opCount = getOpStatistics(OpType.RENAME);
        Path file1 = new Path(dir, "file1");
        fs.rename(file, file1);
        checkStatistics(fs, readOps, ++writeOps, largeReadOps);
        checkOpStatistics(OpType.RENAME, opCount + 1);
        opCount = getOpStatistics(OpType.GET_CONTENT_SUMMARY);
        fs.getContentSummary(file1);
        checkStatistics(fs, ++readOps, writeOps, largeReadOps);
        checkOpStatistics(OpType.GET_CONTENT_SUMMARY, opCount + 1);
        // Iterative ls test
        long mkdirOp = getOpStatistics(OpType.MKDIRS);
        long listStatusOp = getOpStatistics(OpType.LIST_STATUS);
        for (int i = 0; i < 10; i++) {
            Path p = new Path(dir, Integer.toString(i));
            fs.mkdirs(p);
            mkdirOp++;
            FileStatus[] list = fs.listStatus(dir);
            if (list.length > lsLimit) {
                // if large directory, then count readOps and largeReadOps by 
                // number times listStatus iterates
                int iterations = (int) Math.ceil((double) list.length / lsLimit);
                largeReadOps += iterations;
                readOps += iterations;
                listStatusOp += iterations;
            } else {
                // Single iteration in listStatus - no large read operation done
                readOps++;
                listStatusOp++;
            }
            // writeOps incremented by 1 for mkdirs
            // readOps and largeReadOps incremented by 1 or more
            checkStatistics(fs, readOps, ++writeOps, largeReadOps);
            checkOpStatistics(OpType.MKDIRS, mkdirOp);
            checkOpStatistics(OpType.LIST_STATUS, listStatusOp);
        }
        opCount = getOpStatistics(OpType.GET_STATUS);
        fs.getStatus(file1);
        checkStatistics(fs, ++readOps, writeOps, largeReadOps);
        checkOpStatistics(OpType.GET_STATUS, opCount + 1);
        opCount = getOpStatistics(OpType.GET_FILE_CHECKSUM);
        fs.getFileChecksum(file1);
        checkStatistics(fs, ++readOps, writeOps, largeReadOps);
        checkOpStatistics(OpType.GET_FILE_CHECKSUM, opCount + 1);
        opCount = getOpStatistics(OpType.SET_PERMISSION);
        fs.setPermission(file1, new FsPermission((short) 0777));
        checkStatistics(fs, readOps, ++writeOps, largeReadOps);
        checkOpStatistics(OpType.SET_PERMISSION, opCount + 1);
        opCount = getOpStatistics(OpType.SET_TIMES);
        fs.setTimes(file1, 0L, 0L);
        checkStatistics(fs, readOps, ++writeOps, largeReadOps);
        checkOpStatistics(OpType.SET_TIMES, opCount + 1);
        opCount = getOpStatistics(OpType.SET_OWNER);
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        fs.setOwner(file1, ugi.getUserName(), ugi.getGroupNames()[0]);
        checkOpStatistics(OpType.SET_OWNER, opCount + 1);
        checkStatistics(fs, readOps, ++writeOps, largeReadOps);
        opCount = getOpStatistics(OpType.DELETE);
        fs.delete(dir, true);
        checkStatistics(fs, readOps, ++writeOps, largeReadOps);
        checkOpStatistics(OpType.DELETE, opCount + 1);
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FsPermission(org.apache.hadoop.fs.permission.FsPermission) StatisticsData(org.apache.hadoop.fs.FileSystem.Statistics.StatisticsData) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Aggregations

Configuration (org.apache.hadoop.conf.Configuration)1 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 FileStatus (org.apache.hadoop.fs.FileStatus)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 StatisticsData (org.apache.hadoop.fs.FileSystem.Statistics.StatisticsData)1 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)1 Path (org.apache.hadoop.fs.Path)1 FsPermission (org.apache.hadoop.fs.permission.FsPermission)1 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)1 Test (org.junit.Test)1