Search in sources :

Example 66 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class DFSAdmin method report.

/**
   * Gives a report on how the FileSystem is doing.
   * @exception IOException if the filesystem does not exist.
   */
public void report(String[] argv, int i) throws IOException {
    DistributedFileSystem dfs = getDFS();
    FsStatus ds = dfs.getStatus();
    long capacity = ds.getCapacity();
    long used = ds.getUsed();
    long remaining = ds.getRemaining();
    long bytesInFuture = dfs.getBytesWithFutureGenerationStamps();
    long presentCapacity = used + remaining;
    boolean mode = dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET);
    if (mode) {
        System.out.println("Safe mode is ON");
        if (bytesInFuture > 0) {
            System.out.println("\nWARNING: ");
            System.out.println("Name node has detected blocks with generation " + "stamps in future.");
            System.out.println("Forcing exit from safemode will cause " + bytesInFuture + " byte(s) to be deleted.");
            System.out.println("If you are sure that the NameNode was started with" + " the correct metadata files then you may proceed with " + "'-safemode forceExit'\n");
        }
    }
    System.out.println("Configured Capacity: " + capacity + " (" + StringUtils.byteDesc(capacity) + ")");
    System.out.println("Present Capacity: " + presentCapacity + " (" + StringUtils.byteDesc(presentCapacity) + ")");
    System.out.println("DFS Remaining: " + remaining + " (" + StringUtils.byteDesc(remaining) + ")");
    System.out.println("DFS Used: " + used + " (" + StringUtils.byteDesc(used) + ")");
    System.out.println("DFS Used%: " + StringUtils.formatPercent(used / (double) presentCapacity, 2));
    /* These counts are not always upto date. They are updated after  
     * iteration of an internal list. Should be updated in a few seconds to 
     * minutes. Use "-metaSave" to list of all such blocks and accurate 
     * counts.
     */
    System.out.println("Under replicated blocks: " + dfs.getUnderReplicatedBlocksCount());
    System.out.println("Blocks with corrupt replicas: " + dfs.getCorruptBlocksCount());
    System.out.println("Missing blocks: " + dfs.getMissingBlocksCount());
    System.out.println("Missing blocks (with replication factor 1): " + dfs.getMissingReplOneBlocksCount());
    System.out.println("Pending deletion blocks: " + dfs.getPendingDeletionBlocksCount());
    System.out.println();
    System.out.println("-------------------------------------------------");
    // Parse arguments for filtering the node list
    List<String> args = Arrays.asList(argv);
    // Truncate already handled arguments before parsing report()-specific ones
    args = new ArrayList<String>(args.subList(i, args.size()));
    final boolean listLive = StringUtils.popOption("-live", args);
    final boolean listDead = StringUtils.popOption("-dead", args);
    final boolean listDecommissioning = StringUtils.popOption("-decommissioning", args);
    // If no filter flags are found, then list all DN types
    boolean listAll = (!listLive && !listDead && !listDecommissioning);
    if (listAll || listLive) {
        DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
        if (live.length > 0 || listLive) {
            System.out.println("Live datanodes (" + live.length + "):\n");
        }
        if (live.length > 0) {
            for (DatanodeInfo dn : live) {
                System.out.println(dn.getDatanodeReport());
                System.out.println();
            }
        }
    }
    if (listAll || listDead) {
        DatanodeInfo[] dead = dfs.getDataNodeStats(DatanodeReportType.DEAD);
        if (dead.length > 0 || listDead) {
            System.out.println("Dead datanodes (" + dead.length + "):\n");
        }
        if (dead.length > 0) {
            for (DatanodeInfo dn : dead) {
                System.out.println(dn.getDatanodeReport());
                System.out.println();
            }
        }
    }
    if (listAll || listDecommissioning) {
        DatanodeInfo[] decom = dfs.getDataNodeStats(DatanodeReportType.DECOMMISSIONING);
        if (decom.length > 0 || listDecommissioning) {
            System.out.println("Decommissioning datanodes (" + decom.length + "):\n");
        }
        if (decom.length > 0) {
            for (DatanodeInfo dn : decom) {
                System.out.println(dn.getDatanodeReport());
                System.out.println();
            }
        }
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FsStatus(org.apache.hadoop.fs.FsStatus)

Example 67 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class DFSAdmin method refreshNodes.

/**
   * Command to ask the namenode to reread the hosts and excluded hosts 
   * file.
   * Usage: hdfs dfsadmin -refreshNodes
   * @exception IOException 
   */
public int refreshNodes() throws IOException {
    int exitCode = -1;
    DistributedFileSystem dfs = getDFS();
    Configuration dfsConf = dfs.getConf();
    URI dfsUri = dfs.getUri();
    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
    if (isHaEnabled) {
        String nsId = dfsUri.getHost();
        List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
        for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
            proxy.getProxy().refreshNodes();
            System.out.println("Refresh nodes successful for " + proxy.getAddress());
        }
    } else {
        dfs.refreshNodes();
        System.out.println("Refresh nodes successful");
    }
    exitCode = 0;
    return exitCode;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ProxyAndInfo(org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) URI(java.net.URI)

Example 68 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class DFSAdmin method refreshUserToGroupsMappings.

/**
   * Refresh the user-to-groups mappings on the {@link NameNode}.
   * @return exitcode 0 on success, non-zero on failure
   * @throws IOException
   */
public int refreshUserToGroupsMappings() throws IOException {
    // Get the current configuration
    Configuration conf = getConf();
    // for security authorization
    // server principal for this call   
    // should be NN's one.
    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
    DistributedFileSystem dfs = getDFS();
    URI dfsUri = dfs.getUri();
    boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri);
    if (isHaEnabled) {
        // Run refreshUserToGroupsMapings for all NNs if HA is enabled
        String nsId = dfsUri.getHost();
        List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, RefreshUserMappingsProtocol.class);
        for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
            proxy.getProxy().refreshUserToGroupsMappings();
            System.out.println("Refresh user to groups mapping successful for " + proxy.getAddress());
        }
    } else {
        // Create the client
        RefreshUserMappingsProtocol refreshProtocol = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf), RefreshUserMappingsProtocol.class).getProxy();
        // Refresh the user-to-groups mappings
        refreshProtocol.refreshUserToGroupsMappings();
        System.out.println("Refresh user to groups mapping successful");
    }
    return 0;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ProxyAndInfo(org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo) RefreshUserMappingsProtocol(org.apache.hadoop.security.RefreshUserMappingsProtocol) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) URI(java.net.URI)

Example 69 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class DFSAdmin method allowSnapshot.

/**
   * Allow snapshot on a directory.
   * Usage: hdfs dfsadmin -allowSnapshot snapshotDir
   * @param argv List of of command line parameters.
   * @exception IOException
   */
public void allowSnapshot(String[] argv) throws IOException {
    Path p = new Path(argv[1]);
    final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), getConf());
    try {
        dfs.allowSnapshot(p);
    } catch (SnapshotException e) {
        throw new RemoteException(e.getClass().getName(), e.getMessage());
    }
    System.out.println("Allowing snaphot on " + argv[1] + " succeeded");
}
Also used : Path(org.apache.hadoop.fs.Path) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RemoteException(org.apache.hadoop.ipc.RemoteException) SnapshotException(org.apache.hadoop.hdfs.protocol.SnapshotException)

Example 70 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class DFSAdmin method disallowSnapshot.

/**
   * Disallow snapshot on a directory.
   * Usage: hdfs dfsadmin -disallowSnapshot snapshotDir
   * @param argv List of of command line parameters.
   * @exception IOException
   */
public void disallowSnapshot(String[] argv) throws IOException {
    Path p = new Path(argv[1]);
    final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), getConf());
    try {
        dfs.disallowSnapshot(p);
    } catch (SnapshotException e) {
        throw new RemoteException(e.getClass().getName(), e.getMessage());
    }
    System.out.println("Disallowing snaphot on " + argv[1] + " succeeded");
}
Also used : Path(org.apache.hadoop.fs.Path) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RemoteException(org.apache.hadoop.ipc.RemoteException) SnapshotException(org.apache.hadoop.hdfs.protocol.SnapshotException)

Aggregations

DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)252 Test (org.junit.Test)175 Path (org.apache.hadoop.fs.Path)169 Configuration (org.apache.hadoop.conf.Configuration)126 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)126 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)86 IOException (java.io.IOException)63 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)36 FileSystem (org.apache.hadoop.fs.FileSystem)31 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)26 URI (java.net.URI)24 FsPermission (org.apache.hadoop.fs.permission.FsPermission)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 AccessControlException (org.apache.hadoop.security.AccessControlException)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)18 Matchers.anyString (org.mockito.Matchers.anyString)18 FileStatus (org.apache.hadoop.fs.FileStatus)16 ArrayList (java.util.ArrayList)14 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)14