Search in sources :

Example 41 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestViewFsDefaultValue method testGetQuotaUsageWithStorageTypes.

/**
   * Test that getQuotaUsage can be retrieved on the client side if
   * storage types are defined.
   */
@Test
public void testGetQuotaUsageWithStorageTypes() throws IOException {
    FileSystem hFs = cluster.getFileSystem(0);
    final DistributedFileSystem dfs = (DistributedFileSystem) hFs;
    dfs.setQuotaByStorageType(testFileDirPath, StorageType.SSD, 500);
    dfs.setQuotaByStorageType(testFileDirPath, StorageType.DISK, 600);
    QuotaUsage qu = vfs.getQuotaUsage(testFileDirPath);
    assertEquals(500, qu.getTypeQuota(StorageType.SSD));
    assertEquals(600, qu.getTypeQuota(StorageType.DISK));
}
Also used : FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) QuotaUsage(org.apache.hadoop.fs.QuotaUsage) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 42 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestHDFSFileContextMainOperations method testEditsLogRename.

/**
   * Perform operations such as setting quota, deletion of files, rename and
   * ensure system can apply edits log during startup.
   */
@Test
public void testEditsLogRename() throws Exception {
    DistributedFileSystem fs = cluster.getFileSystem();
    Path src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
    Path dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
    createFile(src1);
    fs.mkdirs(dst1.getParent());
    createFile(dst1);
    // Set quota so that dst1 parent cannot allow under it new files/directories 
    fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
    // Free up quota for a subsequent rename
    fs.delete(dst1, true);
    rename(src1, dst1, true, true, false, Rename.OVERWRITE);
    // Restart the cluster and ensure the above operations can be
    // loaded from the edits log
    restartCluster();
    fs = cluster.getFileSystem();
    src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
    dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
    // ensure src1 is already renamed
    Assert.assertFalse(fs.exists(src1));
    // ensure rename dst exists
    Assert.assertTrue(fs.exists(dst1));
}
Also used : DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 43 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestHDFSFileContextMainOperations method oldRename.

private void oldRename(Path src, Path dst, boolean renameSucceeds, boolean exception) throws Exception {
    DistributedFileSystem fs = cluster.getFileSystem();
    try {
        Assert.assertEquals(renameSucceeds, fs.rename(src, dst));
    } catch (Exception ex) {
        Assert.assertTrue(exception);
    }
    Assert.assertEquals(renameSucceeds, !exists(fc, src));
    Assert.assertEquals(renameSucceeds, exists(fc, dst));
}
Also used : DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) LoginException(javax.security.auth.login.LoginException) URISyntaxException(java.net.URISyntaxException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 44 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestEnhancedByteBufferAccess method testFallbackRead.

/**
   * Test the {@link ByteBufferUtil#fallbackRead} function directly.
   */
@Test
public void testFallbackRead() throws Exception {
    HdfsConfiguration conf = initZeroCopyTest();
    MiniDFSCluster cluster = null;
    final Path TEST_PATH = new Path("/a");
    final int TEST_FILE_LENGTH = 16385;
    final int RANDOM_SEED = 23453;
    FSDataInputStream fsIn = null;
    DistributedFileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short) 1, RANDOM_SEED);
        try {
            DFSTestUtil.waitReplication(fs, TEST_PATH, (short) 1);
        } catch (InterruptedException e) {
            Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
        } catch (TimeoutException e) {
            Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
        }
        fsIn = fs.open(TEST_PATH);
        byte[] original = new byte[TEST_FILE_LENGTH];
        IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH);
        fsIn.close();
        fsIn = fs.open(TEST_PATH);
        testFallbackImpl(fsIn, original);
    } finally {
        if (fsIn != null)
            fsIn.close();
        if (fs != null)
            fs.close();
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) TimeoutException(java.util.concurrent.TimeoutException) Test(org.junit.Test)

Example 45 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestBalancer method testMinBlockSizeAndSourceNodes.

/** Balancer should not move blocks with size < minBlockSize. */
@Test(timeout = 60000)
public void testMinBlockSizeAndSourceNodes() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    initConf(conf);
    final short replication = 3;
    final long[] lengths = { 10, 10, 10, 10 };
    final long[] capacities = new long[replication];
    final long totalUsed = capacities.length * sum(lengths);
    Arrays.fill(capacities, 1000);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length).simulatedCapacities(capacities).build();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, dfs.getUri(), ClientProtocol.class).getProxy();
    // fill up the cluster to be 80% full
    for (int i = 0; i < lengths.length; i++) {
        final long size = lengths[i];
        final Path p = new Path("/file" + i + "_size" + size);
        try (final OutputStream out = dfs.create(p)) {
            for (int j = 0; j < size; j++) {
                out.write(j);
            }
        }
    }
    // start up an empty node with the same capacity
    cluster.startDataNodes(conf, capacities.length, true, null, null, capacities);
    LOG.info("capacities    = " + Arrays.toString(capacities));
    LOG.info("totalUsedSpace= " + totalUsed);
    LOG.info("lengths       = " + Arrays.toString(lengths) + ", #=" + lengths.length);
    waitForHeartBeat(totalUsed, 2 * capacities[0] * capacities.length, client, cluster);
    final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
    {
        // run Balancer with min-block-size=50
        final BalancerParameters p = Balancer.Cli.parse(new String[] { "-policy", BalancingPolicy.Node.INSTANCE.getName(), "-threshold", "1" });
        assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
        assertEquals(p.getThreshold(), 1.0, 0.001);
        conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50);
        final int r = Balancer.run(namenodes, p, conf);
        assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
    }
    conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
    {
        // run Balancer with empty nodes as source nodes
        final Set<String> sourceNodes = new HashSet<>();
        final List<DataNode> datanodes = cluster.getDataNodes();
        for (int i = capacities.length; i < datanodes.size(); i++) {
            sourceNodes.add(datanodes.get(i).getDisplayName());
        }
        final BalancerParameters p = Balancer.Cli.parse(new String[] { "-policy", BalancingPolicy.Node.INSTANCE.getName(), "-threshold", "1", "-source", StringUtils.join(sourceNodes, ',') });
        assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
        assertEquals(p.getThreshold(), 1.0, 0.001);
        assertEquals(p.getSourceNodes(), sourceNodes);
        conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50);
        final int r = Balancer.run(namenodes, p, conf);
        assertEquals(ExitStatus.NO_MOVE_BLOCK.getExitCode(), r);
    }
    {
        // run Balancer with a filled node as a source node
        final Set<String> sourceNodes = new HashSet<>();
        final List<DataNode> datanodes = cluster.getDataNodes();
        sourceNodes.add(datanodes.get(0).getDisplayName());
        final BalancerParameters p = Balancer.Cli.parse(new String[] { "-policy", BalancingPolicy.Node.INSTANCE.getName(), "-threshold", "1", "-source", StringUtils.join(sourceNodes, ',') });
        assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
        assertEquals(p.getThreshold(), 1.0, 0.001);
        assertEquals(p.getSourceNodes(), sourceNodes);
        conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
        final int r = Balancer.run(namenodes, p, conf);
        assertEquals(ExitStatus.NO_MOVE_BLOCK.getExitCode(), r);
    }
    {
        // run Balancer with all filled node as source nodes
        final Set<String> sourceNodes = new HashSet<>();
        final List<DataNode> datanodes = cluster.getDataNodes();
        for (int i = 0; i < capacities.length; i++) {
            sourceNodes.add(datanodes.get(i).getDisplayName());
        }
        final BalancerParameters p = Balancer.Cli.parse(new String[] { "-policy", BalancingPolicy.Node.INSTANCE.getName(), "-threshold", "1", "-source", StringUtils.join(sourceNodes, ',') });
        assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
        assertEquals(p.getThreshold(), 1.0, 0.001);
        assertEquals(p.getSourceNodes(), sourceNodes);
        conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
        final int r = Balancer.run(namenodes, p, conf);
        assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Set(java.util.Set) HashSet(java.util.HashSet) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) URI(java.net.URI) ArrayList(java.util.ArrayList) List(java.util.List) Test(org.junit.Test)

Aggregations

DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)252 Test (org.junit.Test)175 Path (org.apache.hadoop.fs.Path)169 Configuration (org.apache.hadoop.conf.Configuration)126 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)126 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)86 IOException (java.io.IOException)63 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)36 FileSystem (org.apache.hadoop.fs.FileSystem)31 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)26 URI (java.net.URI)24 FsPermission (org.apache.hadoop.fs.permission.FsPermission)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 AccessControlException (org.apache.hadoop.security.AccessControlException)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)18 Matchers.anyString (org.mockito.Matchers.anyString)18 FileStatus (org.apache.hadoop.fs.FileStatus)16 ArrayList (java.util.ArrayList)14 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)14