Search in sources :

Example 11 with FileSystemTestHelper

use of org.apache.hadoop.fs.FileSystemTestHelper in project hadoop by apache.

the class TestChRootedFileSystem method setUp.

@Before
public void setUp() throws Exception {
    // create the test root on local_fs
    Configuration conf = new Configuration();
    fSysTarget = FileSystem.getLocal(conf);
    fileSystemTestHelper = new FileSystemTestHelper();
    chrootedTo = fileSystemTestHelper.getAbsoluteTestRootPath(fSysTarget);
    // In case previous test was killed before cleanup
    fSysTarget.delete(chrootedTo, true);
    fSysTarget.mkdirs(chrootedTo);
    // ChRoot to the root of the testDirectory
    fSys = new ChRootedFileSystem(chrootedTo.toUri(), conf);
}
Also used : FileSystemTestHelper(org.apache.hadoop.fs.FileSystemTestHelper) Configuration(org.apache.hadoop.conf.Configuration) ChRootedFileSystem(org.apache.hadoop.fs.viewfs.ChRootedFileSystem) Before(org.junit.Before)

Example 12 with FileSystemTestHelper

use of org.apache.hadoop.fs.FileSystemTestHelper in project hadoop by apache.

the class TestRpcProgramNfs3 method setup.

@BeforeClass
public static void setup() throws Exception {
    String currentUser = System.getProperty("user.name");
    config.set("fs.permissions.umask-mode", "u=rwx,g=,o=");
    config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser), "*");
    config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser), "*");
    fsHelper = new FileSystemTestHelper();
    // Set up java key store
    String testRoot = fsHelper.getTestRootDir();
    testRootDir = new File(testRoot).getAbsoluteFile();
    final Path jksPath = new Path(testRootDir.toString(), "test.jks");
    config.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());
    ProxyUsers.refreshSuperUserGroupsConfiguration(config);
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();
    hdfs = cluster.getFileSystem();
    nn = cluster.getNameNode();
    dfsAdmin = new HdfsAdmin(cluster.getURI(), config);
    // Use ephemeral ports in case tests are running in parallel
    config.setInt("nfs3.mountd.port", 0);
    config.setInt("nfs3.server.port", 0);
    // Start NFS with allowed.hosts set to "* rw"
    config.set("dfs.nfs.exports.allowed.hosts", "* rw");
    nfs = new Nfs3(config);
    nfs.startServiceInternal(false);
    nfsd = (RpcProgramNfs3) nfs.getRpcProgram();
    hdfs.getClient().setKeyProvider(nn.getNamesystem().getProvider());
    DFSTestUtil.createKey(TEST_KEY, cluster, config);
    // Mock SecurityHandler which returns system user.name
    securityHandler = Mockito.mock(SecurityHandler.class);
    Mockito.when(securityHandler.getUser()).thenReturn(currentUser);
    // Mock SecurityHandler which returns a dummy username "harry"
    securityHandlerUnpriviledged = Mockito.mock(SecurityHandler.class);
    Mockito.when(securityHandlerUnpriviledged.getUser()).thenReturn("harry");
}
Also used : FileSystemTestHelper(org.apache.hadoop.fs.FileSystemTestHelper) Path(org.apache.hadoop.fs.Path) SecurityHandler(org.apache.hadoop.oncrpc.security.SecurityHandler) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) File(java.io.File) BeforeClass(org.junit.BeforeClass)

Example 13 with FileSystemTestHelper

use of org.apache.hadoop.fs.FileSystemTestHelper in project hadoop by apache.

the class TestCacheDirectives method testWaitForCachedReplicas.

@Test(timeout = 120000)
public void testWaitForCachedReplicas() throws Exception {
    FileSystemTestHelper helper = new FileSystemTestHelper();
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            return ((namenode.getNamesystem().getCacheCapacity() == (NUM_DATANODES * CACHE_CAPACITY)) && (namenode.getNamesystem().getCacheUsed() == 0));
        }
    }, 500, 60000);
    // Send a cache report referring to a bogus block.  It is important that
    // the NameNode be robust against this.
    NamenodeProtocols nnRpc = namenode.getRpcServer();
    DataNode dn0 = cluster.getDataNodes().get(0);
    String bpid = cluster.getNamesystem().getBlockPoolId();
    LinkedList<Long> bogusBlockIds = new LinkedList<Long>();
    bogusBlockIds.add(999999L);
    nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid), bpid, bogusBlockIds);
    Path rootDir = helper.getDefaultWorkingDirectory(dfs);
    // Create the pool
    final String pool = "friendlyPool";
    nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
    // Create some test files
    final int numFiles = 2;
    final int numBlocksPerFile = 2;
    final List<String> paths = new ArrayList<String>(numFiles);
    for (int i = 0; i < numFiles; i++) {
        Path p = new Path(rootDir, "testCachePaths-" + i);
        FileSystemTestHelper.createFile(dfs, p, numBlocksPerFile, (int) BLOCK_SIZE);
        paths.add(p.toUri().getPath());
    }
    // Check the initial statistics at the namenode
    waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicas:0");
    // Cache and check each path in sequence
    int expected = 0;
    for (int i = 0; i < numFiles; i++) {
        CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder().setPath(new Path(paths.get(i))).setPool(pool).build();
        nnRpc.addCacheDirective(directive, EnumSet.noneOf(CacheFlag.class));
        expected += numBlocksPerFile;
        waitForCachedBlocks(namenode, expected, expected, "testWaitForCachedReplicas:1");
    }
    // Check that the datanodes have the right cache values
    DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
    assertEquals("Unexpected number of live nodes", NUM_DATANODES, live.length);
    long totalUsed = 0;
    for (DatanodeInfo dn : live) {
        final long cacheCapacity = dn.getCacheCapacity();
        final long cacheUsed = dn.getCacheUsed();
        final long cacheRemaining = dn.getCacheRemaining();
        assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
        assertEquals("Capacity not equal to used + remaining", cacheCapacity, cacheUsed + cacheRemaining);
        assertEquals("Remaining not equal to capacity - used", cacheCapacity - cacheUsed, cacheRemaining);
        totalUsed += cacheUsed;
    }
    assertEquals(expected * BLOCK_SIZE, totalUsed);
    // Uncache and check each path in sequence
    RemoteIterator<CacheDirectiveEntry> entries = new CacheDirectiveIterator(nnRpc, null, FsTracer.get(conf));
    for (int i = 0; i < numFiles; i++) {
        CacheDirectiveEntry entry = entries.next();
        nnRpc.removeCacheDirective(entry.getInfo().getId());
        expected -= numBlocksPerFile;
        waitForCachedBlocks(namenode, expected, expected, "testWaitForCachedReplicas:2");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) CacheFlag(org.apache.hadoop.fs.CacheFlag) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) FileSystemTestHelper(org.apache.hadoop.fs.FileSystemTestHelper) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) CacheDirectiveEntry(org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry) CacheDirectiveIterator(org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) Test(org.junit.Test)

Example 14 with FileSystemTestHelper

use of org.apache.hadoop.fs.FileSystemTestHelper in project hadoop by apache.

the class TestViewFileSystemHdfs method clusterSetupAtBegining.

@BeforeClass
public static void clusterSetupAtBegining() throws IOException, LoginException, URISyntaxException {
    // Encryption Zone settings
    FileSystemTestHelper fsHelper = new FileSystemTestHelper();
    String testRoot = fsHelper.getTestRootDir();
    CONF.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + new Path(new File(testRoot).getAbsoluteFile().toString(), "test" + ".jks").toUri());
    CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    CONF.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, 2);
    SupportsBlocks = true;
    CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    cluster = new MiniDFSCluster.Builder(CONF).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).numDataNodes(2).build();
    cluster.waitClusterUp();
    fHdfs = cluster.getFileSystem(0);
    fHdfs2 = cluster.getFileSystem(1);
    fHdfs.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString());
    fHdfs2.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString());
    defaultWorkingDirectory = fHdfs.makeQualified(new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));
    defaultWorkingDirectory2 = fHdfs2.makeQualified(new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));
    fHdfs.mkdirs(defaultWorkingDirectory);
    fHdfs2.mkdirs(defaultWorkingDirectory2);
}
Also used : FileSystemTestHelper(org.apache.hadoop.fs.FileSystemTestHelper) Path(org.apache.hadoop.fs.Path) File(java.io.File) BeforeClass(org.junit.BeforeClass)

Example 15 with FileSystemTestHelper

use of org.apache.hadoop.fs.FileSystemTestHelper in project hadoop by apache.

the class TestAclsEndToEnd method setup.

/**
   * Setup a miniDFS and miniKMS.  The resetKms and resetDfs parameters control
   * whether the services will start fresh or reuse the existing data.
   *
   * @param conf the configuration to use for both the miniKMS and miniDFS
   * @param resetKms whether to start a fresh miniKMS
   * @param resetDfs whether to start a fresh miniDFS
   * @throws Exception thrown if setup fails
   */
private void setup(Configuration conf, boolean resetKms, boolean resetDfs) throws Exception {
    if (resetKms) {
        FileSystemTestHelper fsHelper = new FileSystemTestHelper();
        kmsDir = new File(fsHelper.getTestRootDir()).getAbsoluteFile();
        Assert.assertTrue(kmsDir.mkdirs());
    }
    writeConf(kmsDir, conf);
    MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder();
    miniKMS = miniKMSBuilder.setKmsConfDir(kmsDir).build();
    miniKMS.start();
    conf = new HdfsConfiguration();
    // Set up java key store
    conf.set(ProxyUsers.CONF_HADOOP_PROXYUSER + "." + realUser + ".users", "keyadmin,hdfs,user");
    conf.set(ProxyUsers.CONF_HADOOP_PROXYUSER + "." + realUser + ".hosts", "*");
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, getKeyProviderURI());
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    MiniDFSCluster.Builder clusterBuilder = new MiniDFSCluster.Builder(conf);
    cluster = clusterBuilder.numDataNodes(1).format(resetDfs).build();
    fs = cluster.getFileSystem();
}
Also used : FileSystemTestHelper(org.apache.hadoop.fs.FileSystemTestHelper) MiniKMS(org.apache.hadoop.crypto.key.kms.server.MiniKMS) File(java.io.File)

Aggregations

FileSystemTestHelper (org.apache.hadoop.fs.FileSystemTestHelper)18 File (java.io.File)12 Configuration (org.apache.hadoop.conf.Configuration)9 Path (org.apache.hadoop.fs.Path)9 Before (org.junit.Before)9 HdfsAdmin (org.apache.hadoop.hdfs.client.HdfsAdmin)6 FileSystemTestWrapper (org.apache.hadoop.fs.FileSystemTestWrapper)4 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)4 Test (org.junit.Test)4 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 EncryptionZoneManager (org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager)3 FileContextTestWrapper (org.apache.hadoop.fs.FileContextTestWrapper)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 BeforeClass (org.junit.BeforeClass)2 FileOutputStream (java.io.FileOutputStream)1 RandomAccessFile (java.io.RandomAccessFile)1 SocketTimeoutException (java.net.SocketTimeoutException)1 URL (java.net.URL)1 ArrayList (java.util.ArrayList)1 LinkedList (java.util.LinkedList)1