Search in sources :

Example 76 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestEditLog method testReadActivelyUpdatedLog.

/**
   *
   * @throws Exception
   */
@Test
public void testReadActivelyUpdatedLog() throws Exception {
    final TestAppender appender = new TestAppender();
    LogManager.getRootLogger().addAppender(appender);
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    // Set single handler thread, so all transactions hit same thread-local ops.
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY, 1);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        FSImage fsimage = cluster.getNamesystem().getFSImage();
        StorageDirectory sd = fsimage.getStorage().getStorageDir(0);
        final DistributedFileSystem fileSys = cluster.getFileSystem();
        DFSInotifyEventInputStream events = fileSys.getInotifyEventStream();
        fileSys.mkdirs(new Path("/test"));
        fileSys.mkdirs(new Path("/test/dir1"));
        fileSys.delete(new Path("/test/dir1"), true);
        fsimage.getEditLog().logSync();
        fileSys.mkdirs(new Path("/test/dir2"));
        final File inProgressEdit = NNStorage.getInProgressEditsFile(sd, 1);
        assertTrue(inProgressEdit.exists());
        EditLogFileInputStream elis = new EditLogFileInputStream(inProgressEdit);
        FSEditLogOp op;
        long pos = 0;
        while (true) {
            op = elis.readOp();
            if (op != null && op.opCode != FSEditLogOpCodes.OP_INVALID) {
                pos = elis.getPosition();
            } else {
                break;
            }
        }
        elis.close();
        assertTrue(pos > 0);
        RandomAccessFile rwf = new RandomAccessFile(inProgressEdit, "rw");
        rwf.seek(pos);
        assertEquals(rwf.readByte(), (byte) -1);
        rwf.seek(pos + 1);
        rwf.writeByte(2);
        rwf.close();
        events.poll();
        String pattern = "Caught exception after reading (.*) ops";
        Pattern r = Pattern.compile(pattern);
        final List<LoggingEvent> log = appender.getLog();
        for (LoggingEvent event : log) {
            Matcher m = r.matcher(event.getRenderedMessage());
            if (m.find()) {
                fail("Should not try to read past latest syned edit log op");
            }
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        LogManager.getRootLogger().removeAppender(appender);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Pattern(java.util.regex.Pattern) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Matcher(java.util.regex.Matcher) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) LoggingEvent(org.apache.log4j.spi.LoggingEvent) RandomAccessFile(java.io.RandomAccessFile) DFSInotifyEventInputStream(org.apache.hadoop.hdfs.DFSInotifyEventInputStream) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 77 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestFSEditLogLoader method testAddNewStripedBlock.

@Test
public void testAddNewStripedBlock() throws IOException {
    // start a cluster
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, testECPolicy.getName());
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        FSNamesystem fns = cluster.getNamesystem();
        String testDir = "/ec";
        String testFile = "testfile_001";
        String testFilePath = testDir + "/" + testFile;
        String clientName = "testUser1";
        String clientMachine = "testMachine1";
        long blkId = 1;
        long blkNumBytes = 1024;
        long timestamp = 1426222918;
        short blockNum = (short) testECPolicy.getNumDataUnits();
        short parityNum = (short) testECPolicy.getNumParityUnits();
        //set the storage policy of the directory
        fs.mkdir(new Path(testDir), new FsPermission("755"));
        fs.getClient().getNamenode().setErasureCodingPolicy(testDir, testECPolicy.getName());
        // Create a file with striped block
        Path p = new Path(testFilePath);
        DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
        fns.enterSafeMode(false);
        fns.saveNamespace(0, 0);
        fns.leaveSafeMode(false);
        // Add a striped block to the file
        BlockInfoStriped stripedBlk = new BlockInfoStriped(new Block(blkId, blkNumBytes, timestamp), testECPolicy);
        INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        file.toUnderConstruction(clientName, clientMachine);
        file.addBlock(stripedBlk);
        fns.getEditLog().logAddBlock(testFilePath, file);
        TestINodeFile.toCompleteFile(file);
        //If the block by loaded is the same as above it means that
        //we have successfully applied the edit log to the fsimage.
        cluster.restartNameNodes();
        cluster.waitActive();
        fns = cluster.getNamesystem();
        INodeFile inodeLoaded = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        assertTrue(inodeLoaded.isStriped());
        BlockInfo[] blks = inodeLoaded.getBlocks();
        assertEquals(1, blks.length);
        assertEquals(blkId, blks[0].getBlockId());
        assertEquals(blkNumBytes, blks[0].getNumBytes());
        assertEquals(timestamp, blks[0].getGenerationStamp());
        assertEquals(blockNum, ((BlockInfoStriped) blks[0]).getDataBlockNum());
        assertEquals(parityNum, ((BlockInfoStriped) blks[0]).getParityBlockNum());
        cluster.shutdown();
        cluster = null;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 78 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestFSImage method testDigest.

/**
   * Ensure that the digest written by the saver equals to the digest of the
   * file.
   */
@Test
public void testDigest() throws IOException {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        DistributedFileSystem fs = cluster.getFileSystem();
        fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        fs.saveNamespace();
        fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        File currentDir = FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(0);
        File fsimage = FSImageTestUtil.findNewestImageFile(currentDir.getAbsolutePath());
        assertEquals(MD5FileUtils.readStoredMd5ForFile(fsimage), MD5FileUtils.computeMd5ForFile(fsimage));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) File(java.io.File) Test(org.junit.Test)

Example 79 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestFSImage method testHasNonEcBlockUsingStripedIDForLoadUCFile.

@Test
public void testHasNonEcBlockUsingStripedIDForLoadUCFile() throws IOException {
    // start a cluster
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        FSNamesystem fns = cluster.getNamesystem();
        String testDir = "/test_block_manager";
        String testFile = "testfile_loaducfile";
        String testFilePath = testDir + "/" + testFile;
        String clientName = "testUser_loaducfile";
        String clientMachine = "testMachine_loaducfile";
        long blkId = -1;
        long blkNumBytes = 1024;
        long timestamp = 1426222918;
        fs.mkdir(new Path(testDir), new FsPermission("755"));
        Path p = new Path(testFilePath);
        DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
        BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
        INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        file.toUnderConstruction(clientName, clientMachine);
        file.addBlock(cBlk);
        fns.enterSafeMode(false);
        fns.saveNamespace(0, 0);
        cluster.restartNameNodes();
        cluster.waitActive();
        fns = cluster.getNamesystem();
        assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
        cluster.shutdown();
        cluster = null;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Block(org.apache.hadoop.hdfs.protocol.Block) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 80 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestFSImage method testPersistHelper.

private void testPersistHelper(Configuration conf) throws IOException {
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).build();
        cluster.waitActive();
        FSNamesystem fsn = cluster.getNamesystem();
        DistributedFileSystem fs = cluster.getFileSystem();
        final Path dir = new Path("/abc/def");
        final Path file1 = new Path(dir, "f1");
        final Path file2 = new Path(dir, "f2");
        // create an empty file f1
        fs.create(file1).close();
        // create an under-construction file f2
        FSDataOutputStream out = fs.create(file2);
        out.writeBytes("hello");
        ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
        // checkpoint
        fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        fs.saveNamespace();
        fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        cluster.restartNameNode();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        assertTrue(fs.isDirectory(dir));
        assertTrue(fs.exists(file1));
        assertTrue(fs.exists(file2));
        // check internals of file2
        INodeFile file2Node = fsn.dir.getINode4Write(file2.toString()).asFile();
        assertEquals("hello".length(), file2Node.computeFileSize());
        assertTrue(file2Node.isUnderConstruction());
        BlockInfo[] blks = file2Node.getBlocks();
        assertEquals(1, blks.length);
        assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState());
        // check lease manager
        Lease lease = fsn.leaseManager.getLease(file2Node);
        Assert.assertNotNull(lease);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Lease(org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream)

Aggregations

DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)252 Test (org.junit.Test)175 Path (org.apache.hadoop.fs.Path)169 Configuration (org.apache.hadoop.conf.Configuration)126 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)126 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)86 IOException (java.io.IOException)63 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)36 FileSystem (org.apache.hadoop.fs.FileSystem)31 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)26 URI (java.net.URI)24 FsPermission (org.apache.hadoop.fs.permission.FsPermission)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 AccessControlException (org.apache.hadoop.security.AccessControlException)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)18 Matchers.anyString (org.mockito.Matchers.anyString)18 FileStatus (org.apache.hadoop.fs.FileStatus)16 ArrayList (java.util.ArrayList)14 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)14