Search in sources :

Example 6 with FSNamesystem

use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.

the class TestDataNodeUGIProvider method getWebHdfsFileSystem.

private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi, Configuration conf, List<Token<DelegationTokenIdentifier>> tokens) throws IOException {
    if (UserGroupInformation.isSecurityEnabled()) {
        DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(ugi.getUserName()), null, null);
        FSNamesystem namesystem = mock(FSNamesystem.class);
        DelegationTokenSecretManager dtSecretManager = new DelegationTokenSecretManager(86400000, 86400000, 86400000, 86400000, namesystem);
        dtSecretManager.startThreads();
        Token<DelegationTokenIdentifier> token1 = new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
        Token<DelegationTokenIdentifier> token2 = new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
        SecurityUtil.setTokenService(token1, NetUtils.createSocketAddr(uri.getAuthority()));
        SecurityUtil.setTokenService(token2, NetUtils.createSocketAddr(uri.getAuthority()));
        token1.setKind(WebHdfsConstants.WEBHDFS_TOKEN_KIND);
        token2.setKind(WebHdfsConstants.WEBHDFS_TOKEN_KIND);
        tokens.add(token1);
        tokens.add(token2);
        ugi.addToken(token1);
        ugi.addToken(token2);
    }
    return (WebHdfsFileSystem) FileSystem.get(uri, conf);
}
Also used : DelegationTokenSecretManager(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager) DelegationTokenIdentifier(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier) Text(org.apache.hadoop.io.Text) Token(org.apache.hadoop.security.token.Token) WebHdfsFileSystem(org.apache.hadoop.hdfs.web.WebHdfsFileSystem) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem)

Example 7 with FSNamesystem

use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.

the class DFSTestUtil method createStripedFile.

/**
   * Creates the metadata of a file in striped layout. This method only
   * manipulates the NameNode state without injecting data to DataNode.
   * You should disable periodical heartbeat before use this.
   * @param file Path of the file to create
   * @param dir Parent path of the file
   * @param numBlocks Number of striped block groups to add to the file
   * @param numStripesPerBlk Number of striped cells in each block
   * @param toMkdir
   * @param ecPolicy erasure coding policy apply to created file. A null value
   *                 means using default erasure coding policy.
   */
public static void createStripedFile(MiniDFSCluster cluster, Path file, Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir, ErasureCodingPolicy ecPolicy) throws Exception {
    DistributedFileSystem dfs = cluster.getFileSystem();
    // If outer test already set EC policy, dir should be left as null
    if (toMkdir) {
        assert dir != null;
        dfs.mkdirs(dir);
        try {
            dfs.getClient().setErasureCodingPolicy(dir.toString(), ecPolicy.getName());
        } catch (IOException e) {
            if (!e.getMessage().contains("non-empty directory")) {
                throw e;
            }
        }
    }
    cluster.getNameNodeRpc().create(file.toString(), new FsPermission((short) 0755), dfs.getClient().getClientName(), new EnumSetWritable<>(EnumSet.of(CreateFlag.CREATE)), false, (short) 1, 128 * 1024 * 1024L, null);
    FSNamesystem ns = cluster.getNamesystem();
    FSDirectory fsdir = ns.getFSDirectory();
    INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
    ExtendedBlock previous = null;
    for (int i = 0; i < numBlocks; i++) {
        Block newBlock = addBlockToFile(true, cluster.getDataNodes(), dfs, ns, file.toString(), fileNode, dfs.getClient().getClientName(), previous, numStripesPerBlk, 0);
        previous = new ExtendedBlock(ns.getBlockPoolId(), newBlock);
    }
    dfs.getClient().namenode.complete(file.toString(), dfs.getClient().getClientName(), previous, fileNode.getId());
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile)

Example 8 with FSNamesystem

use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.

the class TestMaintenanceState method testWithNNAndDNRestart.

/**
   * Verify the following scenario.
   * a. Put a live node to maintenance => 1 maintenance, 2 live.
   * b. The maintenance node becomes dead => block map still has 1 maintenance,
   *    2 live.
   * c. Restart nn => block map only has 2 live => restore the 3 live.
   * d. Restart the maintenance dn => 1 maintenance, 3 live.
   * e. Take the node out of maintenance => over replication => 3 live.
   */
@Test(timeout = 360000)
public void testWithNNAndDNRestart() throws Exception {
    LOG.info("Starting testWithNNAndDNRestart");
    final int numNamenodes = 1;
    final int numDatanodes = 4;
    startCluster(numNamenodes, numDatanodes);
    final Path file = new Path("/testWithNNAndDNRestart.dat");
    final int replicas = 3;
    final FileSystem fileSys = getCluster().getFileSystem(0);
    FSNamesystem ns = getCluster().getNamesystem(0);
    writeFile(fileSys, file, replicas, 1);
    DatanodeInfo nodeOutofService = takeNodeOutofService(0, getFirstBlockFirstReplicaUuid(fileSys, file), Long.MAX_VALUE, null, AdminStates.IN_MAINTENANCE);
    assertNull(checkWithRetry(ns, fileSys, file, replicas - 1, nodeOutofService));
    DFSClient client = getDfsClient(0);
    assertEquals("All datanodes must be alive", numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length);
    MiniDFSCluster.DataNodeProperties dnProp = getCluster().stopDataNode(nodeOutofService.getXferAddr());
    DFSTestUtil.waitForDatanodeState(getCluster(), nodeOutofService.getDatanodeUuid(), false, 20000);
    assertEquals("maintenance node shouldn't be alive", numDatanodes - 1, client.datanodeReport(DatanodeReportType.LIVE).length);
    // Dead maintenance node's blocks should remain in block map.
    assertNull(checkWithRetry(ns, fileSys, file, replicas - 1, nodeOutofService));
    // restart nn, nn will restore 3 live replicas given it doesn't
    // know the maintenance node has the replica.
    getCluster().restartNameNode(0);
    ns = getCluster().getNamesystem(0);
    assertNull(checkWithRetry(ns, fileSys, file, replicas, null));
    // restart dn, nn has 1 maintenance replica and 3 live replicas.
    getCluster().restartDataNode(dnProp, true);
    getCluster().waitActive();
    assertNull(checkWithRetry(ns, fileSys, file, replicas, nodeOutofService));
    // Put the node in service, a redundant replica should be removed.
    putNodeInService(0, nodeOutofService.getDatanodeUuid());
    assertNull(checkWithRetry(ns, fileSys, file, replicas, null));
    cleanupFile(fileSys, file);
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileSystem(org.apache.hadoop.fs.FileSystem) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 9 with FSNamesystem

use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.

the class TestMaintenanceState method testFileCloseAfterEnteringMaintenance.

@Test(timeout = 120000)
public void testFileCloseAfterEnteringMaintenance() throws Exception {
    LOG.info("Starting testFileCloseAfterEnteringMaintenance");
    int expirationInMs = 30 * 1000;
    int numDataNodes = 3;
    int numNameNodes = 1;
    getConf().setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, 2);
    startCluster(numNameNodes, numDataNodes);
    getCluster().waitActive();
    FSNamesystem fsn = getCluster().getNameNode().getNamesystem();
    List<String> hosts = new ArrayList<>();
    for (DataNode dn : getCluster().getDataNodes()) {
        hosts.add(dn.getDisplayName());
        putNodeInService(0, dn.getDatanodeUuid());
    }
    assertEquals(numDataNodes, fsn.getNumLiveDataNodes());
    Path openFile = new Path("/testClosingFileInMaintenance.dat");
    // Lets write 2 blocks of data to the openFile
    writeFile(getCluster().getFileSystem(), openFile, (short) 3);
    // Lets write some more data and keep the file open
    FSDataOutputStream fsDataOutputStream = getCluster().getFileSystem().append(openFile);
    byte[] bytes = new byte[1024];
    fsDataOutputStream.write(bytes);
    fsDataOutputStream.hsync();
    LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(getCluster().getNameNode(0), openFile.toString(), 0, 3 * blockSize);
    DatanodeInfo[] dnInfos4LastBlock = lbs.getLastLocatedBlock().getLocations();
    // Request maintenance for DataNodes 1 and 2 which has the last block.
    takeNodeOutofService(0, Lists.newArrayList(dnInfos4LastBlock[0].getDatanodeUuid(), dnInfos4LastBlock[1].getDatanodeUuid()), Time.now() + expirationInMs, null, null, AdminStates.ENTERING_MAINTENANCE);
    // Closing the file should succeed even when the
    // last blocks' nodes are entering maintenance.
    fsDataOutputStream.close();
    cleanupFile(getCluster().getFileSystem(), openFile);
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ArrayList(java.util.ArrayList) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 10 with FSNamesystem

use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.

the class TestMaintenanceState method testExpectedReplication.

private void testExpectedReplication(int replicationFactor, int expectedReplicasInRead) throws IOException {
    setup();
    startCluster(1, 5);
    final Path file = new Path("/testExpectedReplication.dat");
    final FileSystem fileSys = getCluster().getFileSystem(0);
    final FSNamesystem ns = getCluster().getNamesystem(0);
    writeFile(fileSys, file, replicationFactor, 1);
    DatanodeInfo nodeOutofService = takeNodeOutofService(0, getFirstBlockFirstReplicaUuid(fileSys, file), Long.MAX_VALUE, null, AdminStates.IN_MAINTENANCE);
    // The block should be replicated to another datanode to meet
    // expected replication count.
    assertNull(checkWithRetry(ns, fileSys, file, expectedReplicasInRead, nodeOutofService));
    cleanupFile(fileSys, file);
    teardown();
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileSystem(org.apache.hadoop.fs.FileSystem) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem)

Aggregations

FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)77 Test (org.junit.Test)59 Path (org.apache.hadoop.fs.Path)51 FileSystem (org.apache.hadoop.fs.FileSystem)41 Configuration (org.apache.hadoop.conf.Configuration)37 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)27 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)25 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)23 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)14 ArrayList (java.util.ArrayList)12 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)12 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)9 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)7 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)7 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)6 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)6 File (java.io.File)5 IOException (java.io.IOException)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5