use of org.apache.hadoop.hdfs.client.HdfsAdmin in project hadoop by apache.
the class TestEncryptionZonesWithHA method setupCluster.
@Before
public void setupCluster() throws Exception {
conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
HAUtil.setAllowStandbyReads(conf, true);
fsHelper = new FileSystemTestHelper();
String testRoot = fsHelper.getTestRootDir();
testRootDir = new File(testRoot).getAbsoluteFile();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + new Path(testRootDir.toString(), "test.jks").toUri());
cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
cluster.waitActive();
cluster.transitionToActive(0);
fs = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf);
DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf);
nn0 = cluster.getNameNode(0);
nn1 = cluster.getNameNode(1);
dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf);
dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf);
KeyProviderCryptoExtension nn0Provider = cluster.getNameNode(0).getNamesystem().getProvider();
fs.getClient().setKeyProvider(nn0Provider);
}
use of org.apache.hadoop.hdfs.client.HdfsAdmin in project nifi by apache.
the class TestGetHDFSEvents method setup.
@Before
public void setup() {
mockNiFiProperties = mock(NiFiProperties.class);
when(mockNiFiProperties.getKerberosConfigurationFile()).thenReturn(null);
kerberosProperties = new KerberosProperties(null);
inotifyEventInputStream = mock(DFSInotifyEventInputStream.class);
hdfsAdmin = mock(HdfsAdmin.class);
}
use of org.apache.hadoop.hdfs.client.HdfsAdmin in project hadoop by apache.
the class TestHdfsAdmin method testHdfsAdminSetQuota.
/**
* Test that we can set and clear quotas via {@link HdfsAdmin}.
*/
@Test
public void testHdfsAdminSetQuota() throws Exception {
HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
FileSystem fs = null;
try {
fs = FileSystem.get(conf);
assertTrue(fs.mkdirs(TEST_PATH));
assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.setSpaceQuota(TEST_PATH, 10);
assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.setQuota(TEST_PATH, 10);
assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.clearSpaceQuota(TEST_PATH);
assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.clearQuota(TEST_PATH);
assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
} finally {
if (fs != null) {
fs.close();
}
}
}
use of org.apache.hadoop.hdfs.client.HdfsAdmin in project hadoop by apache.
the class TestReservedRawPaths method setup.
@Before
public void setup() throws Exception {
conf = new HdfsConfiguration();
fsHelper = new FileSystemTestHelper();
// Set up java key store
String testRoot = fsHelper.getTestRootDir();
File testRootDir = new File(testRoot).getAbsoluteFile();
final Path jksPath = new Path(testRootDir.toString(), "test.jks");
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
fs = cluster.getFileSystem();
fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem());
fcWrapper = new FileContextTestWrapper(FileContext.getFileContext(cluster.getURI(), conf));
dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
// Need to set the client's KeyProvider to the NN's for JKS,
// else the updates do not get flushed properly
fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem().getProvider());
DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
use of org.apache.hadoop.hdfs.client.HdfsAdmin in project hadoop by apache.
the class TestTrashWithEncryptionZones method setup.
@Before
public void setup() throws Exception {
conf = new HdfsConfiguration();
fsHelper = new FileSystemTestHelper();
// Set up java key store
String testRoot = fsHelper.getTestRootDir();
testRootDir = new File(testRoot).getAbsoluteFile();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, getKeyProviderURI());
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
// Lower the batch size for testing
conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, 2);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
org.apache.log4j.Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
fs = cluster.getFileSystem();
fsWrapper = new FileSystemTestWrapper(fs);
dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
setProvider();
// Create a test key
DFSTestUtil.createKey(TEST_KEY, cluster, conf);
clientConf = new Configuration(conf);
clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
shell = new FsShell(clientConf);
}
Aggregations