use of java.util.HashSet in project hadoop by apache.
the class TestHdfsAdmin method testHdfsAdminStoragePolicies.
/**
* Test that we can set, get, unset storage policies via {@link HdfsAdmin}.
*/
@Test
public void testHdfsAdminStoragePolicies() throws Exception {
HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
FileSystem fs = FileSystem.get(conf);
final Path foo = new Path("/foo");
final Path bar = new Path(foo, "bar");
final Path wow = new Path(bar, "wow");
DFSTestUtil.createFile(fs, wow, SIZE, REPL, 0);
final BlockStoragePolicySuite suite = BlockStoragePolicySuite.createDefaultSuite();
final BlockStoragePolicy warm = suite.getPolicy("WARM");
final BlockStoragePolicy cold = suite.getPolicy("COLD");
final BlockStoragePolicy hot = suite.getPolicy("HOT");
/*
* test: set storage policy
*/
hdfsAdmin.setStoragePolicy(foo, warm.getName());
hdfsAdmin.setStoragePolicy(bar, cold.getName());
hdfsAdmin.setStoragePolicy(wow, hot.getName());
/*
* test: get storage policy after set
*/
assertEquals(hdfsAdmin.getStoragePolicy(foo), warm);
assertEquals(hdfsAdmin.getStoragePolicy(bar), cold);
assertEquals(hdfsAdmin.getStoragePolicy(wow), hot);
/*
* test: unset storage policy
*/
hdfsAdmin.unsetStoragePolicy(foo);
hdfsAdmin.unsetStoragePolicy(bar);
hdfsAdmin.unsetStoragePolicy(wow);
/*
* test: get storage policy after unset. HOT by default.
*/
assertEquals(hdfsAdmin.getStoragePolicy(foo), hot);
assertEquals(hdfsAdmin.getStoragePolicy(bar), hot);
assertEquals(hdfsAdmin.getStoragePolicy(wow), hot);
/*
* test: get all storage policies
*/
// Get policies via HdfsAdmin
Set<String> policyNamesSet1 = new HashSet<>();
for (BlockStoragePolicySpi policy : hdfsAdmin.getAllStoragePolicies()) {
policyNamesSet1.add(policy.getName());
}
// Get policies via BlockStoragePolicySuite
Set<String> policyNamesSet2 = new HashSet<>();
for (BlockStoragePolicy policy : suite.getAllPolicies()) {
policyNamesSet2.add(policy.getName());
}
// Ensure that we got the same set of policies in both cases.
Assert.assertTrue(Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
Assert.assertTrue(Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
}
use of java.util.HashSet in project hadoop by apache.
the class TestInjectionForSimulatedStorage method testInjection.
/* This test makes sure that NameNode retries all the available blocks
* for under replicated blocks. This test uses simulated storage and one
* of its features to inject blocks,
*
* It creates a file with several blocks and replication of 4.
* The cluster is then shut down - NN retains its state but the DNs are
* all simulated and hence loose their blocks.
* The blocks are then injected in one of the DNs. The expected behaviour is
* that the NN will arrange for themissing replica will be copied from a valid source.
*/
@Test
public void testInjection() throws IOException {
MiniDFSCluster cluster = null;
String testFile = "/replication-test-file";
Path testPath = new Path(testFile);
byte[] buffer = new byte[1024];
for (int i = 0; i < buffer.length; i++) {
buffer[i] = '1';
}
try {
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, Integer.toString(numDataNodes));
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, checksumSize);
SimulatedFSDataset.setFactory(conf);
//first time format
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
String bpid = cluster.getNamesystem().getBlockPoolId();
DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
DFSTestUtil.createFile(cluster.getFileSystem(), testPath, filesize, filesize, blockSize, (short) numDataNodes, 0L);
waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, 20);
List<Map<DatanodeStorage, BlockListAsLongs>> blocksList = cluster.getAllBlockReports(bpid);
cluster.shutdown();
cluster = null;
/* Start the MiniDFSCluster with more datanodes since once a writeBlock
* to a datanode node fails, same block can not be written to it
* immediately. In our case some replication attempts will fail.
*/
LOG.info("Restarting minicluster");
conf = new HdfsConfiguration();
SimulatedFSDataset.setFactory(conf);
conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes * 2).format(false).build();
cluster.waitActive();
Set<Block> uniqueBlocks = new HashSet<Block>();
for (Map<DatanodeStorage, BlockListAsLongs> map : blocksList) {
for (BlockListAsLongs blockList : map.values()) {
for (Block b : blockList) {
uniqueBlocks.add(new Block(b));
}
}
}
// Insert all the blocks in the first data node
LOG.info("Inserting " + uniqueBlocks.size() + " blocks");
cluster.injectBlocks(0, uniqueBlocks, null);
dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of java.util.HashSet in project hadoop by apache.
the class TestBalancer method testBalancerCliWithIncludeList.
/**
* Test a cluster with even distribution,
* then three nodes are added to the cluster,
* runs balancer with two of the nodes in the include list
*/
@Test(timeout = 100000)
public void testBalancerCliWithIncludeList() throws Exception {
final Configuration conf = new HdfsConfiguration();
initConf(conf);
Set<String> includeHosts = new HashSet<String>();
includeHosts.add("datanodeY");
doTest(conf, new long[] { CAPACITY, CAPACITY }, new String[] { RACK0, RACK1 }, CAPACITY, RACK2, new HostNameBasedNodes(new String[] { "datanodeX", "datanodeY", "datanodeZ" }, BalancerParameters.DEFAULT.getExcludedNodes(), includeHosts), true, false);
}
use of java.util.HashSet in project hadoop by apache.
the class TestDFSNetworkTopology method testChooseRandomWithStorageTypeWrapper.
/**
* This test tests the wrapper method. The wrapper method only takes one scope
* where if it starts with a ~, it is an excluded scope, and searching always
* from root. Otherwise it is a scope.
* @throws Exception throws exception.
*/
@Test
public void testChooseRandomWithStorageTypeWrapper() throws Exception {
Node n;
DatanodeDescriptor dd;
n = CLUSTER.chooseRandomWithStorageType("/l2/d3/r4", null, null, StorageType.ARCHIVE);
HashSet<Node> excluded = new HashSet<>();
// exclude the host on r4 (since there is only one host, no randomness here)
excluded.add(n);
// search with given scope being desired scope
for (int i = 0; i < 10; i++) {
n = CLUSTER.chooseRandomWithStorageType("/l2/d3", null, StorageType.ARCHIVE);
assertTrue(n instanceof DatanodeDescriptor);
dd = (DatanodeDescriptor) n;
assertTrue(dd.getHostName().equals("host12") || dd.getHostName().equals("host13"));
}
for (int i = 0; i < 10; i++) {
n = CLUSTER.chooseRandomWithStorageType("/l2/d3", excluded, StorageType.ARCHIVE);
assertTrue(n instanceof DatanodeDescriptor);
dd = (DatanodeDescriptor) n;
assertTrue(dd.getHostName().equals("host13"));
}
// so if we exclude /l2/d4/r1, if should be always either host7 or host10
for (int i = 0; i < 10; i++) {
n = CLUSTER.chooseRandomWithStorageType("~/l2/d4", null, StorageType.RAM_DISK);
assertTrue(n instanceof DatanodeDescriptor);
dd = (DatanodeDescriptor) n;
assertTrue(dd.getHostName().equals("host7") || dd.getHostName().equals("host10"));
}
// similar to above, except that we also exclude host10 here. so it should
// always be host7
n = CLUSTER.chooseRandomWithStorageType("/l2/d3/r2", null, null, StorageType.RAM_DISK);
// add host10 to exclude
excluded.add(n);
for (int i = 0; i < 10; i++) {
n = CLUSTER.chooseRandomWithStorageType("~/l2/d4", excluded, StorageType.RAM_DISK);
assertTrue(n instanceof DatanodeDescriptor);
dd = (DatanodeDescriptor) n;
assertTrue(dd.getHostName().equals("host7"));
}
}
use of java.util.HashSet in project hadoop by apache.
the class ContainerLauncherImpl method serviceStart.
protected void serviceStart() throws Exception {
ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("ContainerLauncher #%d").setDaemon(true).build();
// Start with a default core-pool size of 10 and change it dynamically.
launcherPool = new HadoopThreadPoolExecutor(initialPoolSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
eventHandlingThread = new Thread() {
@Override
public void run() {
ContainerLauncherEvent event = null;
Set<String> allNodes = new HashSet<String>();
while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
try {
event = eventQueue.take();
} catch (InterruptedException e) {
if (!stopped.get()) {
LOG.error("Returning, interrupted : " + e);
}
return;
}
allNodes.add(event.getContainerMgrAddress());
int poolSize = launcherPool.getCorePoolSize();
// maximum limit yet.
if (poolSize != limitOnPoolSize) {
// nodes where containers will run at *this* point of time. This is
// *not* the cluster size and doesn't need to be.
int numNodes = allNodes.size();
int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
if (poolSize < idealPoolSize) {
// Bump up the pool size to idealPoolSize+initialPoolSize, the
// later is just a buffer so we are not always increasing the
// pool-size
int newPoolSize = Math.min(limitOnPoolSize, idealPoolSize + initialPoolSize);
LOG.info("Setting ContainerLauncher pool size to " + newPoolSize + " as number-of-nodes to talk to is " + numNodes);
launcherPool.setCorePoolSize(newPoolSize);
}
}
// the events from the queue are handled in parallel
// using a thread pool
launcherPool.execute(createEventProcessor(event));
// TODO: Group launching of multiple containers to a single
// NodeManager into a single connection
}
}
};
eventHandlingThread.setName("ContainerLauncher Event Handler");
eventHandlingThread.start();
super.serviceStart();
}
Aggregations