Search in sources :

Example 96 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project akela by mozilla-metrics.

the class ClusterHealth method main.

public static void main(String[] args) {
    int retCode = 0;
    Configuration conf = new Configuration();
    System.out.println("HDFS NameNode: " + conf.get("fs.default.name"));
    DFSClient dfsClient = null;
    try {
        dfsClient = new DFSClient(conf);
        DatanodeInfo[] liveNodes = dfsClient.datanodeReport(DatanodeReportType.LIVE);
        for (DatanodeInfo dni : liveNodes) {
            long dfsUsed = dni.getDfsUsed();
            long nonDfsUsed = dni.getNonDfsUsed();
            long capacity = dni.getCapacity();
            float capacityPercentage = ((float) (dfsUsed + nonDfsUsed) / (float) capacity) * 100.0f;
            System.out.println(String.format("%s DataNode - [ ALIVE ] - DFS Capacity: (%d + %d / %d) %.2f%%; xceivers: %d", new Object[] { dni.getHostName(), dfsUsed, nonDfsUsed, capacity, capacityPercentage, dni.getXceiverCount() }));
        }
        DatanodeInfo[] deadNodes = dfsClient.datanodeReport(DatanodeReportType.DEAD);
        if (deadNodes.length > 0) {
            retCode = 2;
            for (DatanodeInfo dni : deadNodes) {
                System.out.println(dni.getHostName() + " DataNode - [ DEAD ]");
            }
        }
    } catch (IOException e) {
        retCode = 2;
        System.out.println("IOException occurred while checking HDFS cluster status!");
        e.printStackTrace(System.err);
    } finally {
        if (dfsClient != null) {
            try {
                dfsClient.close();
            } catch (IOException e) {
                System.out.println("IOException occurred while closing DFS client!");
                e.printStackTrace(System.err);
            }
        }
    }
    Configuration hbaseConf = HBaseConfiguration.create(conf);
    HBaseAdmin hbaseAdmin;
    try {
        System.out.println("HBase Rootdir: " + hbaseConf.get("hbase.rootdir"));
        hbaseAdmin = new HBaseAdmin(hbaseConf);
        ClusterStatus hcs = hbaseAdmin.getClusterStatus();
        int regionsCount = hcs.getRegionsCount();
        int requestsCount = hcs.getRequestsCount();
        for (ServerName server : hcs.getServers()) {
            HServerLoad hsl = hcs.getLoad(server);
            float heapPercentage = ((float) hsl.getUsedHeapMB() / (float) hsl.getMaxHeapMB()) * 100.0f;
            float regionsPercentage = regionsCount == 0 ? 0.0f : ((float) hsl.getNumberOfRegions() / (float) regionsCount) * 100.0f;
            float requestsPercentage = requestsCount == 0 ? 0.0f : ((float) hsl.getNumberOfRequests() / (float) requestsCount) * 100.0f;
            System.out.println(String.format("%s RegionServer - [ ALIVE ] - Memory Heap: (%d / %d MB) %.2f%%, Regions: (%d / %d) %.2f%%, Requests: (%d / %d) %.2f%%", new Object[] { server.getHostname(), hsl.getUsedHeapMB(), hsl.getMaxHeapMB(), heapPercentage, hsl.getNumberOfRegions(), regionsCount, regionsPercentage, hsl.getNumberOfRequests(), requestsCount, requestsPercentage }));
        }
        if (hcs.getDeadServers() > 0) {
            retCode = 2;
            for (ServerName server : hcs.getDeadServerNames()) {
                System.out.println(server.getHostname() + " RegionServer - [ DEAD ]");
            }
        }
    } catch (MasterNotRunningException e) {
        System.out.println("HBase Master is not running!");
        retCode = 2;
    } catch (IOException e) {
        System.out.println("IOException occurred while checking HBase cluster status!");
        retCode = 2;
    }
    int failures = 0;
    for (String host : args) {
        if (!ClusterHealth.testThrift(host)) {
            failures++;
        }
    }
    if (failures > 0) {
        retCode = 2;
    }
    System.exit(retCode);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) IOException(java.io.IOException) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin)

Example 97 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project SSM by Intel-bigdata.

the class TestInotifyFetcher method testFetcher.

@Test(timeout = 60000)
public void testFetcher() throws IOException, InterruptedException {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    // so that we can get an atime change
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    builder.numDataNodes(2);
    MiniDFSCluster cluster = builder.build();
    try {
        cluster.waitActive();
        DFSClient client = new DFSClient(cluster.getNameNode(0).getNameNodeAddress(), conf);
        FileSystem fs = cluster.getFileSystem(0);
        DFSTestUtil.createFile(fs, new Path("/file"), BLOCK_SIZE, (short) 1, 0L);
        DFSTestUtil.createFile(fs, new Path("/file3"), BLOCK_SIZE, (short) 1, 0L);
        DFSTestUtil.createFile(fs, new Path("/file5"), BLOCK_SIZE, (short) 1, 0L);
        DFSTestUtil.createFile(fs, new Path("/truncate_file"), BLOCK_SIZE * 2, (short) 1, 0L);
        fs.mkdirs(new Path("/tmp"), new FsPermission("777"));
        DBAdapter adapter = mock(DBAdapter.class);
        EventApplierForTest applierForTest = new EventApplierForTest(adapter, client);
        final InotifyEventFetcher fetcher = new InotifyEventFetcher(client, adapter, Executors.newScheduledThreadPool(2), applierForTest);
        Thread thread = new Thread() {

            public void run() {
                try {
                    fetcher.start();
                } catch (IOException | InterruptedException e) {
                    e.printStackTrace();
                }
            }
        };
        thread.start();
        Thread.sleep(2000);
        /**
       * Code copy from {@link org.apache.hadoop.hdfs.TestDFSInotifyEventInputStream}
       */
        // RenameOp -> RenameEvent
        client.rename("/file", "/file4", null);
        // RenameOldOp -> RenameEvent
        client.rename("/file4", "/file2");
        // DeleteOp, AddOp -> UnlinkEvent, CreateEvent
        OutputStream os = client.create("/file2", true, (short) 2, BLOCK_SIZE);
        os.write(new byte[BLOCK_SIZE]);
        // CloseOp -> CloseEvent
        os.close();
        // AddOp -> AppendEvent
        os = client.append("/file2", BLOCK_SIZE, EnumSet.of(CreateFlag.APPEND), null, null);
        os.write(new byte[BLOCK_SIZE]);
        // CloseOp -> CloseEvent
        os.close();
        // so that the atime will get updated on the next line
        Thread.sleep(10);
        // TimesOp -> MetadataUpdateEvent
        client.open("/file2").read(new byte[1]);
        // SetReplicationOp -> MetadataUpdateEvent
        client.setReplication("/file2", (short) 1);
        // ConcatDeleteOp -> AppendEvent, UnlinkEvent, CloseEvent
        client.concat("/file2", new String[] { "/file3" });
        // DeleteOp -> UnlinkEvent
        client.delete("/file2", false);
        // MkdirOp -> CreateEvent
        client.mkdirs("/dir", null, false);
        // SetPermissionsOp -> MetadataUpdateEvent
        client.setPermission("/dir", FsPermission.valueOf("-rw-rw-rw-"));
        // SetOwnerOp -> MetadataUpdateEvent
        Thread.sleep(2000);
        client.setOwner("/dir", "username", "groupname");
        // SymlinkOp -> CreateEvent
        client.createSymlink("/dir", "/dir2", false);
        client.setXAttr("/file5", "user.field", "value".getBytes(), EnumSet.of(// SetXAttrOp -> MetadataUpdateEvent
        XAttrSetFlag.CREATE));
        // RemoveXAttrOp -> MetadataUpdateEvent
        client.removeXAttr("/file5", "user.field");
        // SetAclOp -> MetadataUpdateEvent
        client.setAcl("/file5", AclEntry.parseAclSpec("user::rwx,user:foo:rw-,group::r--,other::---", true));
        // SetAclOp -> MetadataUpdateEvent
        client.removeAcl("/file5");
        // RenameOldOp -> RenameEvent
        client.rename("/file5", "/dir");
        //TruncateOp -> TruncateEvent
        client.truncate("/truncate_file", BLOCK_SIZE);
        while (applierForTest.getEvents().size() != 21) {
            Thread.sleep(100);
        }
        /**
       * Refer {@link org.apache.hadoop.hdfs.TestDFSInotifyEventInputStream} for more detail
       */
        List<Event> events = applierForTest.getEvents();
        Assert.assertTrue(events.get(0).getEventType() == Event.EventType.RENAME);
        Assert.assertTrue(events.get(1).getEventType() == Event.EventType.RENAME);
        Assert.assertTrue(events.get(2).getEventType() == Event.EventType.CREATE);
        Assert.assertTrue(events.get(3).getEventType() == Event.EventType.CLOSE);
        Assert.assertTrue(events.get(4).getEventType() == Event.EventType.APPEND);
        Assert.assertTrue(events.get(5).getEventType() == Event.EventType.CLOSE);
        Assert.assertTrue(events.get(6).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(7).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(8).getEventType() == Event.EventType.APPEND);
        Assert.assertTrue(events.get(9).getEventType() == Event.EventType.UNLINK);
        Assert.assertTrue(events.get(10).getEventType() == Event.EventType.CLOSE);
        Assert.assertTrue(events.get(11).getEventType() == Event.EventType.UNLINK);
        Assert.assertTrue(events.get(12).getEventType() == Event.EventType.CREATE);
        Assert.assertTrue(events.get(13).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(14).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(15).getEventType() == Event.EventType.CREATE);
        Assert.assertTrue(events.get(16).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(17).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(18).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(19).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(20).getEventType() == Event.EventType.RENAME);
        //      Assert.assertTrue(events.get(21).getEventType() == Event.EventType.TRUNCATE);
        fetcher.stop();
    } finally {
        cluster.shutdown();
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) OutputStream(java.io.OutputStream) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DBAdapter(org.smartdata.server.metastore.DBAdapter) FileSystem(org.apache.hadoop.fs.FileSystem) Event(org.apache.hadoop.hdfs.inotify.Event) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Aggregations

DFSClient (org.apache.hadoop.hdfs.DFSClient)97 Test (org.junit.Test)53 IOException (java.io.IOException)35 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)27 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)26 VisibleForTesting (com.google.common.annotations.VisibleForTesting)18 Path (org.apache.hadoop.fs.Path)18 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)17 InetSocketAddress (java.net.InetSocketAddress)13 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)13 Configuration (org.apache.hadoop.conf.Configuration)12 NfsConfiguration (org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration)12 FileSystem (org.apache.hadoop.fs.FileSystem)11 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)11 HdfsDataOutputStream (org.apache.hadoop.hdfs.client.HdfsDataOutputStream)9 WccData (org.apache.hadoop.nfs.nfs3.response.WccData)9 ShellBasedIdMapping (org.apache.hadoop.security.ShellBasedIdMapping)8 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)7 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)7 ArrayList (java.util.ArrayList)6