Search in sources :

Example 46 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestHASafeMode method testOpenFileWhenNNAndClientCrashAfterAddBlock.

/** Test NN crash and client crash/stuck immediately after block allocation */
@Test(timeout = 100000)
public void testOpenFileWhenNNAndClientCrashAfterAddBlock() throws Exception {
    cluster.getConfiguration(0).set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "1.0f");
    String testData = "testData";
    // to make sure we write the full block before creating dummy block at NN.
    cluster.getConfiguration(0).setInt("io.bytes.per.checksum", testData.length());
    cluster.restartNameNode(0);
    try {
        cluster.waitActive();
        cluster.transitionToActive(0);
        cluster.transitionToStandby(1);
        DistributedFileSystem dfs = cluster.getFileSystem(0);
        String pathString = "/tmp1.txt";
        Path filePath = new Path(pathString);
        FSDataOutputStream create = dfs.create(filePath, FsPermission.getDefault(), true, 1024, (short) 3, testData.length(), null);
        create.write(testData.getBytes());
        create.hflush();
        long fileId = ((DFSOutputStream) create.getWrappedStream()).getFileId();
        FileStatus fileStatus = dfs.getFileStatus(filePath);
        DFSClient client = DFSClientAdapter.getClient(dfs);
        // add one dummy block at NN, but not write to DataNode
        ExtendedBlock previousBlock = DFSClientAdapter.getPreviousBlock(client, fileId);
        DFSClientAdapter.getNamenode(client).addBlock(pathString, client.getClientName(), new ExtendedBlock(previousBlock), new DatanodeInfo[0], DFSClientAdapter.getFileId((DFSOutputStream) create.getWrappedStream()), null, null);
        cluster.restartNameNode(0, true);
        cluster.restartDataNode(0);
        cluster.transitionToActive(0);
        // let the block reports be processed.
        Thread.sleep(2000);
        FSDataInputStream is = dfs.open(filePath);
        is.close();
        // initiate recovery
        dfs.recoverLease(filePath);
        assertTrue("Recovery also should be success", dfs.recoverLease(filePath));
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DFSClient(org.apache.hadoop.hdfs.DFSClient) FileStatus(org.apache.hadoop.fs.FileStatus) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) Test(org.junit.Test)

Example 47 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestOfflineImageViewer method createOriginalFSImage.

// Create a populated namespace for later testing. Save its contents to a
// data structure and store its fsimage location.
// We only want to generate the fsimage file once and use it for
// multiple tests.
@BeforeClass
public static void createOriginalFSImage() throws IOException {
    tempDir = Files.createTempDir();
    MiniDFSCluster cluster = null;
    try {
        final ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.XOR_2_1_POLICY_ID);
        Configuration conf = new Configuration();
        conf.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
        conf.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
        conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, ecPolicy.getName());
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        DistributedFileSystem hdfs = cluster.getFileSystem();
        // Create a reasonable namespace
        for (int i = 0; i < NUM_DIRS; i++, dirCount++) {
            Path dir = new Path("/dir" + i);
            hdfs.mkdirs(dir);
            writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
            for (int j = 0; j < FILES_PER_DIR; j++) {
                Path file = new Path(dir, "file" + j);
                FSDataOutputStream o = hdfs.create(file);
                o.write(23);
                o.close();
                writtenFiles.put(file.toString(), pathToFileEntry(hdfs, file.toString()));
            }
        }
        // Create an empty directory
        Path emptydir = new Path("/emptydir");
        hdfs.mkdirs(emptydir);
        dirCount++;
        writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));
        //Create a directory whose name should be escaped in XML
        Path invalidXMLDir = new Path("/dirContainingInvalidXMLCharhere");
        hdfs.mkdirs(invalidXMLDir);
        dirCount++;
        //Create a directory with sticky bits
        Path stickyBitDir = new Path("/stickyBit");
        hdfs.mkdirs(stickyBitDir);
        hdfs.setPermission(stickyBitDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true));
        dirCount++;
        writtenFiles.put(stickyBitDir.toString(), hdfs.getFileStatus(stickyBitDir));
        // Get delegation tokens so we log the delegation token op
        Token<?>[] delegationTokens = hdfs.addDelegationTokens(TEST_RENEWER, null);
        for (Token<?> t : delegationTokens) {
            LOG.debug("got token " + t);
        }
        // Create INodeReference
        final Path src = new Path("/src");
        hdfs.mkdirs(src);
        dirCount++;
        writtenFiles.put(src.toString(), hdfs.getFileStatus(src));
        // Create snapshot and snapshotDiff.
        final Path orig = new Path("/src/orig");
        hdfs.mkdirs(orig);
        final Path file1 = new Path("/src/file");
        FSDataOutputStream o = hdfs.create(file1);
        o.write(23);
        o.write(45);
        o.close();
        hdfs.allowSnapshot(src);
        hdfs.createSnapshot(src, "snapshot");
        final Path dst = new Path("/dst");
        // Rename a directory in the snapshot directory to add snapshotCopy
        // field to the dirDiff entry.
        hdfs.rename(orig, dst);
        dirCount++;
        writtenFiles.put(dst.toString(), hdfs.getFileStatus(dst));
        // Truncate a file in the snapshot directory to add snapshotCopy and
        // blocks fields to the fileDiff entry.
        hdfs.truncate(file1, 1);
        writtenFiles.put(file1.toString(), hdfs.getFileStatus(file1));
        // Set XAttrs so the fsimage contains XAttr ops
        final Path xattr = new Path("/xattr");
        hdfs.mkdirs(xattr);
        dirCount++;
        hdfs.setXAttr(xattr, "user.a1", new byte[] { 0x31, 0x32, 0x33 });
        hdfs.setXAttr(xattr, "user.a2", new byte[] { 0x37, 0x38, 0x39 });
        // OIV should be able to handle empty value XAttrs
        hdfs.setXAttr(xattr, "user.a3", null);
        // OIV should be able to handle XAttr values that can't be expressed
        // as UTF8
        hdfs.setXAttr(xattr, "user.a4", new byte[] { -0x3d, 0x28 });
        writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr));
        // Set ACLs
        hdfs.setAcl(xattr, Lists.newArrayList(aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE), aclEntry(ACCESS, OTHER, EXECUTE)));
        // Create an Erasure Coded dir
        Path ecDir = new Path("/ec");
        hdfs.mkdirs(ecDir);
        dirCount++;
        hdfs.getClient().setErasureCodingPolicy(ecDir.toString(), ecPolicy.getName());
        writtenFiles.put(ecDir.toString(), hdfs.getFileStatus(ecDir));
        // Create an empty Erasure Coded file
        Path emptyECFile = new Path(ecDir, "EmptyECFile.txt");
        hdfs.create(emptyECFile).close();
        writtenFiles.put(emptyECFile.toString(), pathToFileEntry(hdfs, emptyECFile.toString()));
        filesECCount++;
        // Create a small Erasure Coded file
        Path smallECFile = new Path(ecDir, "SmallECFile.txt");
        FSDataOutputStream out = hdfs.create(smallECFile);
        Random r = new Random();
        byte[] bytes = new byte[1024 * 10];
        r.nextBytes(bytes);
        out.write(bytes);
        writtenFiles.put(smallECFile.toString(), pathToFileEntry(hdfs, smallECFile.toString()));
        filesECCount++;
        // Write results to the fsimage file
        hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
        hdfs.saveNamespace();
        hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
        // Determine location of fsimage file
        originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
        if (originalFsimage == null) {
            throw new RuntimeException("Didn't generate or can't find fsimage");
        }
        LOG.debug("original FS image file is " + originalFsimage);
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) Token(org.apache.hadoop.security.token.Token) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Random(java.util.Random) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FsPermission(org.apache.hadoop.fs.permission.FsPermission) BeforeClass(org.junit.BeforeClass)

Example 48 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestOfflineImageViewerForAcl method createOriginalFSImage.

/**
   * Create a populated namespace for later testing. Save its contents to a
   * data structure and store its fsimage location.
   * We only want to generate the fsimage file once and use it for
   * multiple tests.
   */
@BeforeClass
public static void createOriginalFSImage() throws IOException {
    MiniDFSCluster cluster = null;
    try {
        Configuration conf = new Configuration();
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
        cluster = new MiniDFSCluster.Builder(conf).build();
        cluster.waitActive();
        DistributedFileSystem hdfs = cluster.getFileSystem();
        // Create a reasonable namespace with ACLs
        Path dir = new Path("/dirWithNoAcl");
        hdfs.mkdirs(dir);
        writtenAcls.put(dir.toString(), hdfs.getAclStatus(dir));
        dir = new Path("/dirWithDefaultAcl");
        hdfs.mkdirs(dir);
        hdfs.setAcl(dir, Lists.newArrayList(aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)));
        writtenAcls.put(dir.toString(), hdfs.getAclStatus(dir));
        Path file = new Path("/noAcl");
        FSDataOutputStream o = hdfs.create(file);
        o.write(23);
        o.close();
        writtenAcls.put(file.toString(), hdfs.getAclStatus(file));
        file = new Path("/withAcl");
        o = hdfs.create(file);
        o.write(23);
        o.close();
        hdfs.setAcl(file, Lists.newArrayList(aclEntry(ACCESS, USER, READ_WRITE), aclEntry(ACCESS, USER, "foo", READ), aclEntry(ACCESS, GROUP, READ), aclEntry(ACCESS, OTHER, NONE)));
        writtenAcls.put(file.toString(), hdfs.getAclStatus(file));
        file = new Path("/withSeveralAcls");
        o = hdfs.create(file);
        o.write(23);
        o.close();
        hdfs.setAcl(file, Lists.newArrayList(aclEntry(ACCESS, USER, READ_WRITE), aclEntry(ACCESS, USER, "foo", READ_WRITE), aclEntry(ACCESS, USER, "bar", READ), aclEntry(ACCESS, GROUP, READ), aclEntry(ACCESS, GROUP, "group", READ), aclEntry(ACCESS, OTHER, NONE)));
        writtenAcls.put(file.toString(), hdfs.getAclStatus(file));
        // Write results to the fsimage file
        hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
        hdfs.saveNamespace();
        // Determine the location of the fsimage file
        originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
        if (originalFsimage == null) {
            throw new RuntimeException("Didn't generate or can't find fsimage");
        }
        LOG.debug("original FS image file is " + originalFsimage);
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) BeforeClass(org.junit.BeforeClass)

Example 49 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestOfflineImageViewerForContentSummary method createOriginalFSImage.

/**
   * Create a populated namespace for later testing. Save its contents to a
   * data structure and store its fsimage location. We only want to generate
   * the fsimage file once and use it for multiple tests.
   */
@BeforeClass
public static void createOriginalFSImage() throws IOException {
    MiniDFSCluster cluster = null;
    Configuration conf = new Configuration();
    try {
        cluster = new MiniDFSCluster.Builder(conf).build();
        cluster.waitActive();
        DistributedFileSystem hdfs = cluster.getFileSystem();
        Path parentDir = new Path("/parentDir");
        Path childDir1 = new Path(parentDir, "childDir1");
        Path childDir2 = new Path(parentDir, "childDir2");
        Path dirForLinks = new Path("/dirForLinks");
        hdfs.mkdirs(parentDir);
        hdfs.mkdirs(childDir1);
        hdfs.mkdirs(childDir2);
        hdfs.mkdirs(dirForLinks);
        hdfs.setQuota(parentDir, 10, 1024 * 1024 * 1024);
        Path file1OnParentDir = new Path(parentDir, "file1");
        try (FSDataOutputStream o = hdfs.create(file1OnParentDir)) {
            o.write("123".getBytes());
        }
        try (FSDataOutputStream o = hdfs.create(new Path(parentDir, "file2"))) {
            o.write("1234".getBytes());
        }
        try (FSDataOutputStream o = hdfs.create(new Path(childDir1, "file3"))) {
            o.write("123".getBytes());
        }
        try (FSDataOutputStream o = hdfs.create(new Path(parentDir, "file4"))) {
            o.write("123".getBytes());
        }
        Path link1 = new Path("/link1");
        Path link2 = new Path("/dirForLinks/linkfordir1");
        hdfs.createSymlink(new Path("/parentDir/file4"), link1, true);
        summaryFromDFS = hdfs.getContentSummary(parentDir);
        emptyDirSummaryFromDFS = hdfs.getContentSummary(childDir2);
        fileSummaryFromDFS = hdfs.getContentSummary(file1OnParentDir);
        symLinkSummaryFromDFS = hdfs.getContentSummary(link1);
        hdfs.createSymlink(childDir1, link2, true);
        symLinkSummaryForDirContainsFromDFS = hdfs.getContentSummary(new Path("/dirForLinks"));
        // Write results to the fsimage file
        hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
        hdfs.saveNamespace();
        // Determine the location of the fsimage file
        originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
        if (originalFsimage == null) {
            throw new RuntimeException("Didn't generate or can't find fsimage");
        }
        LOG.debug("original FS image file is " + originalFsimage);
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) BeforeClass(org.junit.BeforeClass)

Example 50 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestWebHdfsFileSystemContract method testRootDir.

public void testRootDir() throws IOException {
    final Path root = new Path("/");
    final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem) fs;
    final URL url = webhdfs.toUrl(GetOpParam.Op.NULL, root);
    WebHdfsFileSystem.LOG.info("null url=" + url);
    Assert.assertTrue(url.toString().contains("v1"));
    //test root permission
    final FileStatus status = fs.getFileStatus(root);
    assertTrue(status != null);
    assertEquals(0777, status.getPermission().toShort());
    //delete root
    assertFalse(fs.delete(root, true));
    //create file using root path 
    try {
        final FSDataOutputStream out = fs.create(root);
        out.write(1);
        out.close();
        fail();
    } catch (IOException e) {
        WebHdfsFileSystem.LOG.info("This is expected.", e);
    }
    //open file using root path 
    try {
        final FSDataInputStream in = fs.open(root);
        in.read();
        fail();
    } catch (IOException e) {
        WebHdfsFileSystem.LOG.info("This is expected.", e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) URL(java.net.URL)

Aggregations

FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)789 Path (org.apache.hadoop.fs.Path)618 Test (org.junit.Test)345 FileSystem (org.apache.hadoop.fs.FileSystem)248 Configuration (org.apache.hadoop.conf.Configuration)190 IOException (java.io.IOException)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)94 IgfsPath (org.apache.ignite.igfs.IgfsPath)78 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)66 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)65 FileStatus (org.apache.hadoop.fs.FileStatus)57 FsPermission (org.apache.hadoop.fs.permission.FsPermission)45 CreateFlag (org.apache.hadoop.fs.CreateFlag)43 FileNotFoundException (java.io.FileNotFoundException)40 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)40 ArrayList (java.util.ArrayList)38 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)33 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)30 Random (java.util.Random)28