Search in sources :

Example 86 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestNameNodeMetrics method testNumActiveClientsAndFilesUnderConstructionMetrics.

/**
   * Test metrics indicating the number of active clients and the files under
   * construction
   */
@Test(timeout = 60000)
public void testNumActiveClientsAndFilesUnderConstructionMetrics() throws Exception {
    final Path file1 = getTestPath("testFileAdd1");
    createFile(file1, 100, (short) 3);
    assertGauge("NumActiveClients", 0L, getMetrics(NS_METRICS));
    assertGauge("NumFilesUnderConstruction", 0L, getMetrics(NS_METRICS));
    Path file2 = new Path("/testFileAdd2");
    FSDataOutputStream output2 = fs.create(file2);
    output2.writeBytes("Some test data");
    assertGauge("NumActiveClients", 1L, getMetrics(NS_METRICS));
    assertGauge("NumFilesUnderConstruction", 1L, getMetrics(NS_METRICS));
    Path file3 = new Path("/testFileAdd3");
    FSDataOutputStream output3 = fs.create(file3);
    output3.writeBytes("Some test data");
    assertGauge("NumActiveClients", 1L, getMetrics(NS_METRICS));
    assertGauge("NumFilesUnderConstruction", 2L, getMetrics(NS_METRICS));
    // create another DistributedFileSystem client
    DistributedFileSystem fs1 = (DistributedFileSystem) cluster.getNewFileSystemInstance(0);
    try {
        Path file4 = new Path("/testFileAdd4");
        FSDataOutputStream output4 = fs1.create(file4);
        output4.writeBytes("Some test data");
        assertGauge("NumActiveClients", 2L, getMetrics(NS_METRICS));
        assertGauge("NumFilesUnderConstruction", 3L, getMetrics(NS_METRICS));
        Path file5 = new Path("/testFileAdd35");
        FSDataOutputStream output5 = fs1.create(file5);
        output5.writeBytes("Some test data");
        assertGauge("NumActiveClients", 2L, getMetrics(NS_METRICS));
        assertGauge("NumFilesUnderConstruction", 4L, getMetrics(NS_METRICS));
        output2.close();
        output3.close();
        assertGauge("NumActiveClients", 1L, getMetrics(NS_METRICS));
        assertGauge("NumFilesUnderConstruction", 2L, getMetrics(NS_METRICS));
        output4.close();
        output5.close();
        assertGauge("NumActiveClients", 0L, getMetrics(NS_METRICS));
        assertGauge("NumFilesUnderConstruction", 0L, getMetrics(NS_METRICS));
    } finally {
        fs1.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 87 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestNameNodeMetrics method testTransactionSinceLastCheckpointMetrics.

/**
   * Testing TransactionsSinceLastCheckpoint. Need a new cluster as
   * the other tests in here don't use HA. See HDFS-7501.
   */
@Test(timeout = 300000)
public void testTransactionSinceLastCheckpointMetrics() throws Exception {
    Random random = new Random();
    int retryCount = 0;
    while (retryCount < 5) {
        try {
            int basePort = 10060 + random.nextInt(100) * 2;
            MiniDFSNNTopology topology = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort)).addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));
            HdfsConfiguration conf2 = new HdfsConfiguration();
            // Lower the checkpoint condition for purpose of testing.
            conf2.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 100);
            // Check for checkpoint condition very often, for purpose of testing.
            conf2.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
            // Poll and follow ANN txns very often, for purpose of testing.
            conf2.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
            MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf2).nnTopology(topology).numDataNodes(1).build();
            cluster2.waitActive();
            DistributedFileSystem fs2 = cluster2.getFileSystem(0);
            NameNode nn0 = cluster2.getNameNode(0);
            NameNode nn1 = cluster2.getNameNode(1);
            cluster2.transitionToActive(0);
            fs2.mkdirs(new Path("/tmp-t1"));
            fs2.mkdirs(new Path("/tmp-t2"));
            HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
            // Test to ensure tracking works before the first-ever
            // checkpoint.
            assertEquals("SBN failed to track 2 transactions pre-checkpoint.", // 2 txns added further when catch-up is called.
            4L, cluster2.getNameNode(1).getNamesystem().getTransactionsSinceLastCheckpoint());
            // rounded at 100, as 4 + 94 + 2 (catch-up call) = 100.
            for (int i = 1; i <= 94; i++) {
                fs2.mkdirs(new Path("/tmp-" + i));
            }
            HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
            // Assert 100 transactions in checkpoint.
            HATestUtil.waitForCheckpoint(cluster2, 1, ImmutableList.of(100));
            // Test to ensure number tracks the right state of
            // uncheckpointed edits, and does not go negative
            // (as fixed in HDFS-7501).
            assertEquals("Should be zero right after the checkpoint.", 0L, cluster2.getNameNode(1).getNamesystem().getTransactionsSinceLastCheckpoint());
            fs2.mkdirs(new Path("/tmp-t3"));
            fs2.mkdirs(new Path("/tmp-t4"));
            HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
            // Test to ensure we track the right numbers after
            // the checkpoint resets it to zero again.
            assertEquals("SBN failed to track 2 added txns after the ckpt.", 4L, cluster2.getNameNode(1).getNamesystem().getTransactionsSinceLastCheckpoint());
            cluster2.shutdown();
            break;
        } catch (Exception e) {
            LOG.warn("Unable to set up HA cluster, exception thrown: " + e);
            retryCount++;
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) NoSuchAlgorithmException(java.security.NoSuchAlgorithmException) IOException(java.io.IOException) Random(java.util.Random) MiniDFSNNTopology(org.apache.hadoop.hdfs.MiniDFSNNTopology) Test(org.junit.Test)

Example 88 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestNameNodeMetrics method testGenerateEDEKTime.

@Test
public void testGenerateEDEKTime() throws IOException, NoSuchAlgorithmException {
    //Create new MiniDFSCluster with EncryptionZone configurations
    Configuration conf = new HdfsConfiguration();
    FileSystemTestHelper fsHelper = new FileSystemTestHelper();
    // Set up java key store
    String testRoot = fsHelper.getTestRootDir();
    File testRootDir = new File(testRoot).getAbsoluteFile();
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + new Path(testRootDir.toString(), "test.jks").toUri());
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, 2);
    try (MiniDFSCluster clusterEDEK = new MiniDFSCluster.Builder(conf).numDataNodes(1).build()) {
        DistributedFileSystem fsEDEK = clusterEDEK.getFileSystem();
        FileSystemTestWrapper fsWrapper = new FileSystemTestWrapper(fsEDEK);
        HdfsAdmin dfsAdmin = new HdfsAdmin(clusterEDEK.getURI(), conf);
        fsEDEK.getClient().setKeyProvider(clusterEDEK.getNameNode().getNamesystem().getProvider());
        String testKey = "test_key";
        DFSTestUtil.createKey(testKey, clusterEDEK, conf);
        final Path zoneParent = new Path("/zones");
        final Path zone1 = new Path(zoneParent, "zone1");
        fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
        dfsAdmin.createEncryptionZone(zone1, "test_key", EnumSet.of(CreateEncryptionZoneFlag.NO_TRASH));
        MetricsRecordBuilder rb = getMetrics(NN_METRICS);
        for (int i = 0; i < 3; i++) {
            Path filePath = new Path("/zones/zone1/testfile-" + i);
            DFSTestUtil.createFile(fsEDEK, filePath, 1024, (short) 3, 1L);
            assertQuantileGauges("GenerateEDEKTime1s", rb);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FileSystemTestHelper(org.apache.hadoop.fs.FileSystemTestHelper) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) FileSystemTestWrapper(org.apache.hadoop.fs.FileSystemTestWrapper) File(java.io.File) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 89 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class HDFSConcat method main.

public static void main(String... args) throws IOException {
    if (args.length < 2) {
        System.err.println("Usage HDFSConcat target srcs..");
        System.exit(0);
    }
    Configuration conf = new Configuration();
    String uri = conf.get("fs.default.name", def_uri);
    Path path = new Path(uri);
    DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(path.toUri(), conf);
    Path[] srcs = new Path[args.length - 1];
    for (int i = 1; i < args.length; i++) {
        srcs[i - 1] = new Path(args[i]);
    }
    dfs.concat(new Path(args[0]), srcs);
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem)

Example 90 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestCryptoAdminCLI method setUp.

@Before
@Override
public void setUp() throws Exception {
    super.setUp();
    conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, HDFSPolicyProvider.class, PolicyProvider.class);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    conf.setLong(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 10);
    tmpDir = GenericTestUtils.getTestDir(UUID.randomUUID().toString());
    final Path jksPath = new Path(tmpDir.toString(), "test.jks");
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());
    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    dfsCluster.waitClusterUp();
    createAKey("mykey", conf);
    namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
    username = System.getProperty("user.name");
    fs = dfsCluster.getFileSystem();
    assertTrue("Not an HDFS: " + fs.getUri(), fs instanceof DistributedFileSystem);
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Before(org.junit.Before)

Aggregations

DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)252 Test (org.junit.Test)175 Path (org.apache.hadoop.fs.Path)169 Configuration (org.apache.hadoop.conf.Configuration)126 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)126 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)86 IOException (java.io.IOException)63 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)36 FileSystem (org.apache.hadoop.fs.FileSystem)31 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)26 URI (java.net.URI)24 FsPermission (org.apache.hadoop.fs.permission.FsPermission)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 AccessControlException (org.apache.hadoop.security.AccessControlException)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)18 Matchers.anyString (org.mockito.Matchers.anyString)18 FileStatus (org.apache.hadoop.fs.FileStatus)16 ArrayList (java.util.ArrayList)14 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)14