use of org.apache.hadoop.fs.FileSystemTestHelper in project hadoop by apache.
the class TestChRootedFileSystem method setUp.
@Before
public void setUp() throws Exception {
// create the test root on local_fs
Configuration conf = new Configuration();
fSysTarget = FileSystem.getLocal(conf);
fileSystemTestHelper = new FileSystemTestHelper();
chrootedTo = fileSystemTestHelper.getAbsoluteTestRootPath(fSysTarget);
// In case previous test was killed before cleanup
fSysTarget.delete(chrootedTo, true);
fSysTarget.mkdirs(chrootedTo);
// ChRoot to the root of the testDirectory
fSys = new ChRootedFileSystem(chrootedTo.toUri(), conf);
}
use of org.apache.hadoop.fs.FileSystemTestHelper in project hadoop by apache.
the class TestRpcProgramNfs3 method setup.
@BeforeClass
public static void setup() throws Exception {
String currentUser = System.getProperty("user.name");
config.set("fs.permissions.umask-mode", "u=rwx,g=,o=");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser), "*");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser), "*");
fsHelper = new FileSystemTestHelper();
// Set up java key store
String testRoot = fsHelper.getTestRootDir();
testRootDir = new File(testRoot).getAbsoluteFile();
final Path jksPath = new Path(testRootDir.toString(), "test.jks");
config.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
nn = cluster.getNameNode();
dfsAdmin = new HdfsAdmin(cluster.getURI(), config);
// Use ephemeral ports in case tests are running in parallel
config.setInt("nfs3.mountd.port", 0);
config.setInt("nfs3.server.port", 0);
// Start NFS with allowed.hosts set to "* rw"
config.set("dfs.nfs.exports.allowed.hosts", "* rw");
nfs = new Nfs3(config);
nfs.startServiceInternal(false);
nfsd = (RpcProgramNfs3) nfs.getRpcProgram();
hdfs.getClient().setKeyProvider(nn.getNamesystem().getProvider());
DFSTestUtil.createKey(TEST_KEY, cluster, config);
// Mock SecurityHandler which returns system user.name
securityHandler = Mockito.mock(SecurityHandler.class);
Mockito.when(securityHandler.getUser()).thenReturn(currentUser);
// Mock SecurityHandler which returns a dummy username "harry"
securityHandlerUnpriviledged = Mockito.mock(SecurityHandler.class);
Mockito.when(securityHandlerUnpriviledged.getUser()).thenReturn("harry");
}
use of org.apache.hadoop.fs.FileSystemTestHelper in project hadoop by apache.
the class TestCacheDirectives method testWaitForCachedReplicas.
@Test(timeout = 120000)
public void testWaitForCachedReplicas() throws Exception {
FileSystemTestHelper helper = new FileSystemTestHelper();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return ((namenode.getNamesystem().getCacheCapacity() == (NUM_DATANODES * CACHE_CAPACITY)) && (namenode.getNamesystem().getCacheUsed() == 0));
}
}, 500, 60000);
// Send a cache report referring to a bogus block. It is important that
// the NameNode be robust against this.
NamenodeProtocols nnRpc = namenode.getRpcServer();
DataNode dn0 = cluster.getDataNodes().get(0);
String bpid = cluster.getNamesystem().getBlockPoolId();
LinkedList<Long> bogusBlockIds = new LinkedList<Long>();
bogusBlockIds.add(999999L);
nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid), bpid, bogusBlockIds);
Path rootDir = helper.getDefaultWorkingDirectory(dfs);
// Create the pool
final String pool = "friendlyPool";
nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
// Create some test files
final int numFiles = 2;
final int numBlocksPerFile = 2;
final List<String> paths = new ArrayList<String>(numFiles);
for (int i = 0; i < numFiles; i++) {
Path p = new Path(rootDir, "testCachePaths-" + i);
FileSystemTestHelper.createFile(dfs, p, numBlocksPerFile, (int) BLOCK_SIZE);
paths.add(p.toUri().getPath());
}
// Check the initial statistics at the namenode
waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicas:0");
// Cache and check each path in sequence
int expected = 0;
for (int i = 0; i < numFiles; i++) {
CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder().setPath(new Path(paths.get(i))).setPool(pool).build();
nnRpc.addCacheDirective(directive, EnumSet.noneOf(CacheFlag.class));
expected += numBlocksPerFile;
waitForCachedBlocks(namenode, expected, expected, "testWaitForCachedReplicas:1");
}
// Check that the datanodes have the right cache values
DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
assertEquals("Unexpected number of live nodes", NUM_DATANODES, live.length);
long totalUsed = 0;
for (DatanodeInfo dn : live) {
final long cacheCapacity = dn.getCacheCapacity();
final long cacheUsed = dn.getCacheUsed();
final long cacheRemaining = dn.getCacheRemaining();
assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
assertEquals("Capacity not equal to used + remaining", cacheCapacity, cacheUsed + cacheRemaining);
assertEquals("Remaining not equal to capacity - used", cacheCapacity - cacheUsed, cacheRemaining);
totalUsed += cacheUsed;
}
assertEquals(expected * BLOCK_SIZE, totalUsed);
// Uncache and check each path in sequence
RemoteIterator<CacheDirectiveEntry> entries = new CacheDirectiveIterator(nnRpc, null, FsTracer.get(conf));
for (int i = 0; i < numFiles; i++) {
CacheDirectiveEntry entry = entries.next();
nnRpc.removeCacheDirective(entry.getInfo().getId());
expected -= numBlocksPerFile;
waitForCachedBlocks(namenode, expected, expected, "testWaitForCachedReplicas:2");
}
}
use of org.apache.hadoop.fs.FileSystemTestHelper in project hadoop by apache.
the class TestViewFileSystemHdfs method clusterSetupAtBegining.
@BeforeClass
public static void clusterSetupAtBegining() throws IOException, LoginException, URISyntaxException {
// Encryption Zone settings
FileSystemTestHelper fsHelper = new FileSystemTestHelper();
String testRoot = fsHelper.getTestRootDir();
CONF.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + new Path(new File(testRoot).getAbsoluteFile().toString(), "test" + ".jks").toUri());
CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
CONF.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, 2);
SupportsBlocks = true;
CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
cluster = new MiniDFSCluster.Builder(CONF).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).numDataNodes(2).build();
cluster.waitClusterUp();
fHdfs = cluster.getFileSystem(0);
fHdfs2 = cluster.getFileSystem(1);
fHdfs.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString());
fHdfs2.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString());
defaultWorkingDirectory = fHdfs.makeQualified(new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));
defaultWorkingDirectory2 = fHdfs2.makeQualified(new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));
fHdfs.mkdirs(defaultWorkingDirectory);
fHdfs2.mkdirs(defaultWorkingDirectory2);
}
use of org.apache.hadoop.fs.FileSystemTestHelper in project hadoop by apache.
the class TestAclsEndToEnd method setup.
/**
* Setup a miniDFS and miniKMS. The resetKms and resetDfs parameters control
* whether the services will start fresh or reuse the existing data.
*
* @param conf the configuration to use for both the miniKMS and miniDFS
* @param resetKms whether to start a fresh miniKMS
* @param resetDfs whether to start a fresh miniDFS
* @throws Exception thrown if setup fails
*/
private void setup(Configuration conf, boolean resetKms, boolean resetDfs) throws Exception {
if (resetKms) {
FileSystemTestHelper fsHelper = new FileSystemTestHelper();
kmsDir = new File(fsHelper.getTestRootDir()).getAbsoluteFile();
Assert.assertTrue(kmsDir.mkdirs());
}
writeConf(kmsDir, conf);
MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder();
miniKMS = miniKMSBuilder.setKmsConfDir(kmsDir).build();
miniKMS.start();
conf = new HdfsConfiguration();
// Set up java key store
conf.set(ProxyUsers.CONF_HADOOP_PROXYUSER + "." + realUser + ".users", "keyadmin,hdfs,user");
conf.set(ProxyUsers.CONF_HADOOP_PROXYUSER + "." + realUser + ".hosts", "*");
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, getKeyProviderURI());
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
MiniDFSCluster.Builder clusterBuilder = new MiniDFSCluster.Builder(conf);
cluster = clusterBuilder.numDataNodes(1).format(resetDfs).build();
fs = cluster.getFileSystem();
}
Aggregations