Search in sources :

Example 11 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestPipelinesFailover method testFailoverRightBeforeCommitSynchronization.

/**
   * Test the scenario where the NN fails over after issuing a block
   * synchronization request, but before it is committed. The
   * DN running the recovery should then fail to commit the synchronization
   * and a later retry will succeed.
   */
@Test(timeout = 30000)
public void testFailoverRightBeforeCommitSynchronization() throws Exception {
    final Configuration conf = new Configuration();
    // Disable permissions so that another user can recover the lease.
    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    FSDataOutputStream stm = null;
    final MiniDFSCluster cluster = newMiniCluster(conf, 3);
    try {
        cluster.waitActive();
        cluster.transitionToActive(0);
        Thread.sleep(500);
        LOG.info("Starting with NN 0 active");
        FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
        stm = fs.create(TEST_PATH);
        // write a half block
        AppendTestUtil.write(stm, 0, BLOCK_SIZE / 2);
        stm.hflush();
        // Look into the block manager on the active node for the block
        // under construction.
        NameNode nn0 = cluster.getNameNode(0);
        ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
        DatanodeDescriptor expectedPrimary = DFSTestUtil.getExpectedPrimaryNode(nn0, blk);
        LOG.info("Expecting block recovery to be triggered on DN " + expectedPrimary);
        // Find the corresponding DN daemon, and spy on its connection to the
        // active.
        DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort());
        DatanodeProtocolClientSideTranslatorPB nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(primaryDN, nn0);
        // Delay the commitBlockSynchronization call
        DelayAnswer delayer = new DelayAnswer(LOG);
        Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(Mockito.eq(blk), // new genstamp
        Mockito.anyInt(), // new length
        Mockito.anyLong(), // close file
        Mockito.eq(true), // delete block
        Mockito.eq(false), // new targets
        (DatanodeID[]) Mockito.anyObject(), // new target storages
        (String[]) Mockito.anyObject());
        DistributedFileSystem fsOtherUser = createFsAsOtherUser(cluster, conf);
        assertFalse(fsOtherUser.recoverLease(TEST_PATH));
        LOG.info("Waiting for commitBlockSynchronization call from primary");
        delayer.waitForCall();
        LOG.info("Failing over to NN 1");
        cluster.transitionToStandby(0);
        cluster.transitionToActive(1);
        // Let the commitBlockSynchronization call go through, and check that
        // it failed with the correct exception.
        delayer.proceed();
        delayer.waitForResult();
        Throwable t = delayer.getThrown();
        if (t == null) {
            fail("commitBlockSynchronization call did not fail on standby");
        }
        GenericTestUtils.assertExceptionContains("Operation category WRITE is not supported", t);
        // Now, if we try again to recover the block, it should succeed on the new
        // active.
        loopRecoverLease(fsOtherUser, TEST_PATH);
        AppendTestUtil.check(fs, TEST_PATH, BLOCK_SIZE / 2);
    } finally {
        IOUtils.closeStream(stm);
        cluster.shutdown();
    }
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) DelayAnswer(org.apache.hadoop.test.GenericTestUtils.DelayAnswer) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 12 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestBootstrapStandbyWithQJM method setup.

@Before
public void setup() throws Exception {
    Configuration conf = new Configuration();
    // Turn off IPC client caching, so that the suite can handle
    // the restart of the daemons between test cases.
    conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
    MiniQJMHACluster miniQjmHaCluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount).build();
    cluster = miniQjmHaCluster.getDfsCluster();
    jCluster = miniQjmHaCluster.getJournalCluster();
    // make nn0 active
    cluster.transitionToActive(0);
    // do sth to generate in-progress edit log data
    DistributedFileSystem dfs = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
    dfs.mkdirs(new Path("/test2"));
    dfs.close();
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) MiniQJMHACluster(org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Before(org.junit.Before)

Example 13 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestHAMetrics method testHAInodeCount.

@Test
public void testHAInodeCount() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, Integer.MAX_VALUE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
    FileSystem fs = null;
    try {
        cluster.waitActive();
        FSNamesystem nn0 = cluster.getNamesystem(0);
        FSNamesystem nn1 = cluster.getNamesystem(1);
        cluster.transitionToActive(0);
        fs = HATestUtil.configureFailoverFs(cluster, conf);
        DFSTestUtil.createFile(fs, new Path("/testHAInodeCount1"), 10, (short) 1, 1L);
        DFSTestUtil.createFile(fs, new Path("/testHAInodeCount2"), 10, (short) 1, 1L);
        DFSTestUtil.createFile(fs, new Path("/testHAInodeCount3"), 10, (short) 1, 1L);
        DFSTestUtil.createFile(fs, new Path("/testHAInodeCount4"), 10, (short) 1, 1L);
        // 1 dir and 4 files
        assertEquals(5, nn0.getFilesTotal());
        // The SBN still has one dir, which is "/".
        assertEquals(1, nn1.getFilesTotal());
        // Save fsimage so that nn does not build up namesystem by replaying
        // edits, but load from the image.
        ((DistributedFileSystem) fs).setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        ((DistributedFileSystem) fs).saveNamespace();
        // Flip the two namenodes and restart the standby, which will load
        // the fsimage.
        cluster.transitionToStandby(0);
        cluster.transitionToActive(1);
        cluster.restartNameNode(0);
        assertEquals(nn0.getHAState(), "standby");
        // The restarted standby should report the correct count
        nn0 = cluster.getNamesystem(0);
        assertEquals(5, nn0.getFilesTotal());
    } finally {
        IOUtils.cleanup(LOG, fs);
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 14 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestHASafeMode method testOpenFileWhenNNAndClientCrashAfterAddBlock.

/** Test NN crash and client crash/stuck immediately after block allocation */
@Test(timeout = 100000)
public void testOpenFileWhenNNAndClientCrashAfterAddBlock() throws Exception {
    cluster.getConfiguration(0).set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "1.0f");
    String testData = "testData";
    // to make sure we write the full block before creating dummy block at NN.
    cluster.getConfiguration(0).setInt("io.bytes.per.checksum", testData.length());
    cluster.restartNameNode(0);
    try {
        cluster.waitActive();
        cluster.transitionToActive(0);
        cluster.transitionToStandby(1);
        DistributedFileSystem dfs = cluster.getFileSystem(0);
        String pathString = "/tmp1.txt";
        Path filePath = new Path(pathString);
        FSDataOutputStream create = dfs.create(filePath, FsPermission.getDefault(), true, 1024, (short) 3, testData.length(), null);
        create.write(testData.getBytes());
        create.hflush();
        long fileId = ((DFSOutputStream) create.getWrappedStream()).getFileId();
        FileStatus fileStatus = dfs.getFileStatus(filePath);
        DFSClient client = DFSClientAdapter.getClient(dfs);
        // add one dummy block at NN, but not write to DataNode
        ExtendedBlock previousBlock = DFSClientAdapter.getPreviousBlock(client, fileId);
        DFSClientAdapter.getNamenode(client).addBlock(pathString, client.getClientName(), new ExtendedBlock(previousBlock), new DatanodeInfo[0], DFSClientAdapter.getFileId((DFSOutputStream) create.getWrappedStream()), null, null);
        cluster.restartNameNode(0, true);
        cluster.restartDataNode(0);
        cluster.transitionToActive(0);
        // let the block reports be processed.
        Thread.sleep(2000);
        FSDataInputStream is = dfs.open(filePath);
        is.close();
        // initiate recovery
        dfs.recoverLease(filePath);
        assertTrue("Recovery also should be success", dfs.recoverLease(filePath));
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DFSClient(org.apache.hadoop.hdfs.DFSClient) FileStatus(org.apache.hadoop.fs.FileStatus) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) Test(org.junit.Test)

Example 15 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestOfflineImageViewer method createOriginalFSImage.

// Create a populated namespace for later testing. Save its contents to a
// data structure and store its fsimage location.
// We only want to generate the fsimage file once and use it for
// multiple tests.
@BeforeClass
public static void createOriginalFSImage() throws IOException {
    tempDir = Files.createTempDir();
    MiniDFSCluster cluster = null;
    try {
        final ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.XOR_2_1_POLICY_ID);
        Configuration conf = new Configuration();
        conf.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
        conf.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
        conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, ecPolicy.getName());
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        DistributedFileSystem hdfs = cluster.getFileSystem();
        // Create a reasonable namespace
        for (int i = 0; i < NUM_DIRS; i++, dirCount++) {
            Path dir = new Path("/dir" + i);
            hdfs.mkdirs(dir);
            writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
            for (int j = 0; j < FILES_PER_DIR; j++) {
                Path file = new Path(dir, "file" + j);
                FSDataOutputStream o = hdfs.create(file);
                o.write(23);
                o.close();
                writtenFiles.put(file.toString(), pathToFileEntry(hdfs, file.toString()));
            }
        }
        // Create an empty directory
        Path emptydir = new Path("/emptydir");
        hdfs.mkdirs(emptydir);
        dirCount++;
        writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));
        //Create a directory whose name should be escaped in XML
        Path invalidXMLDir = new Path("/dirContainingInvalidXMLCharhere");
        hdfs.mkdirs(invalidXMLDir);
        dirCount++;
        //Create a directory with sticky bits
        Path stickyBitDir = new Path("/stickyBit");
        hdfs.mkdirs(stickyBitDir);
        hdfs.setPermission(stickyBitDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true));
        dirCount++;
        writtenFiles.put(stickyBitDir.toString(), hdfs.getFileStatus(stickyBitDir));
        // Get delegation tokens so we log the delegation token op
        Token<?>[] delegationTokens = hdfs.addDelegationTokens(TEST_RENEWER, null);
        for (Token<?> t : delegationTokens) {
            LOG.debug("got token " + t);
        }
        // Create INodeReference
        final Path src = new Path("/src");
        hdfs.mkdirs(src);
        dirCount++;
        writtenFiles.put(src.toString(), hdfs.getFileStatus(src));
        // Create snapshot and snapshotDiff.
        final Path orig = new Path("/src/orig");
        hdfs.mkdirs(orig);
        final Path file1 = new Path("/src/file");
        FSDataOutputStream o = hdfs.create(file1);
        o.write(23);
        o.write(45);
        o.close();
        hdfs.allowSnapshot(src);
        hdfs.createSnapshot(src, "snapshot");
        final Path dst = new Path("/dst");
        // Rename a directory in the snapshot directory to add snapshotCopy
        // field to the dirDiff entry.
        hdfs.rename(orig, dst);
        dirCount++;
        writtenFiles.put(dst.toString(), hdfs.getFileStatus(dst));
        // Truncate a file in the snapshot directory to add snapshotCopy and
        // blocks fields to the fileDiff entry.
        hdfs.truncate(file1, 1);
        writtenFiles.put(file1.toString(), hdfs.getFileStatus(file1));
        // Set XAttrs so the fsimage contains XAttr ops
        final Path xattr = new Path("/xattr");
        hdfs.mkdirs(xattr);
        dirCount++;
        hdfs.setXAttr(xattr, "user.a1", new byte[] { 0x31, 0x32, 0x33 });
        hdfs.setXAttr(xattr, "user.a2", new byte[] { 0x37, 0x38, 0x39 });
        // OIV should be able to handle empty value XAttrs
        hdfs.setXAttr(xattr, "user.a3", null);
        // OIV should be able to handle XAttr values that can't be expressed
        // as UTF8
        hdfs.setXAttr(xattr, "user.a4", new byte[] { -0x3d, 0x28 });
        writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr));
        // Set ACLs
        hdfs.setAcl(xattr, Lists.newArrayList(aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE), aclEntry(ACCESS, OTHER, EXECUTE)));
        // Create an Erasure Coded dir
        Path ecDir = new Path("/ec");
        hdfs.mkdirs(ecDir);
        dirCount++;
        hdfs.getClient().setErasureCodingPolicy(ecDir.toString(), ecPolicy.getName());
        writtenFiles.put(ecDir.toString(), hdfs.getFileStatus(ecDir));
        // Create an empty Erasure Coded file
        Path emptyECFile = new Path(ecDir, "EmptyECFile.txt");
        hdfs.create(emptyECFile).close();
        writtenFiles.put(emptyECFile.toString(), pathToFileEntry(hdfs, emptyECFile.toString()));
        filesECCount++;
        // Create a small Erasure Coded file
        Path smallECFile = new Path(ecDir, "SmallECFile.txt");
        FSDataOutputStream out = hdfs.create(smallECFile);
        Random r = new Random();
        byte[] bytes = new byte[1024 * 10];
        r.nextBytes(bytes);
        out.write(bytes);
        writtenFiles.put(smallECFile.toString(), pathToFileEntry(hdfs, smallECFile.toString()));
        filesECCount++;
        // Write results to the fsimage file
        hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
        hdfs.saveNamespace();
        hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
        // Determine location of fsimage file
        originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
        if (originalFsimage == null) {
            throw new RuntimeException("Didn't generate or can't find fsimage");
        }
        LOG.debug("original FS image file is " + originalFsimage);
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) Token(org.apache.hadoop.security.token.Token) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Random(java.util.Random) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FsPermission(org.apache.hadoop.fs.permission.FsPermission) BeforeClass(org.junit.BeforeClass)

Aggregations

DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)252 Test (org.junit.Test)175 Path (org.apache.hadoop.fs.Path)169 Configuration (org.apache.hadoop.conf.Configuration)126 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)126 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)86 IOException (java.io.IOException)63 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)36 FileSystem (org.apache.hadoop.fs.FileSystem)31 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)26 URI (java.net.URI)24 FsPermission (org.apache.hadoop.fs.permission.FsPermission)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 AccessControlException (org.apache.hadoop.security.AccessControlException)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)18 Matchers.anyString (org.mockito.Matchers.anyString)18 FileStatus (org.apache.hadoop.fs.FileStatus)16 ArrayList (java.util.ArrayList)14 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)14