Search in sources :

Example 91 with HashSet

use of java.util.HashSet in project hadoop by apache.

the class TestHdfsAdmin method testHdfsAdminStoragePolicies.

/**
   * Test that we can set, get, unset storage policies via {@link HdfsAdmin}.
   */
@Test
public void testHdfsAdminStoragePolicies() throws Exception {
    HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
    FileSystem fs = FileSystem.get(conf);
    final Path foo = new Path("/foo");
    final Path bar = new Path(foo, "bar");
    final Path wow = new Path(bar, "wow");
    DFSTestUtil.createFile(fs, wow, SIZE, REPL, 0);
    final BlockStoragePolicySuite suite = BlockStoragePolicySuite.createDefaultSuite();
    final BlockStoragePolicy warm = suite.getPolicy("WARM");
    final BlockStoragePolicy cold = suite.getPolicy("COLD");
    final BlockStoragePolicy hot = suite.getPolicy("HOT");
    /*
     * test: set storage policy
     */
    hdfsAdmin.setStoragePolicy(foo, warm.getName());
    hdfsAdmin.setStoragePolicy(bar, cold.getName());
    hdfsAdmin.setStoragePolicy(wow, hot.getName());
    /*
     * test: get storage policy after set
     */
    assertEquals(hdfsAdmin.getStoragePolicy(foo), warm);
    assertEquals(hdfsAdmin.getStoragePolicy(bar), cold);
    assertEquals(hdfsAdmin.getStoragePolicy(wow), hot);
    /*
     * test: unset storage policy
     */
    hdfsAdmin.unsetStoragePolicy(foo);
    hdfsAdmin.unsetStoragePolicy(bar);
    hdfsAdmin.unsetStoragePolicy(wow);
    /*
     * test: get storage policy after unset. HOT by default.
     */
    assertEquals(hdfsAdmin.getStoragePolicy(foo), hot);
    assertEquals(hdfsAdmin.getStoragePolicy(bar), hot);
    assertEquals(hdfsAdmin.getStoragePolicy(wow), hot);
    /*
     * test: get all storage policies
     */
    // Get policies via HdfsAdmin
    Set<String> policyNamesSet1 = new HashSet<>();
    for (BlockStoragePolicySpi policy : hdfsAdmin.getAllStoragePolicies()) {
        policyNamesSet1.add(policy.getName());
    }
    // Get policies via BlockStoragePolicySuite
    Set<String> policyNamesSet2 = new HashSet<>();
    for (BlockStoragePolicy policy : suite.getAllPolicies()) {
        policyNamesSet2.add(policy.getName());
    }
    // Ensure that we got the same set of policies in both cases.
    Assert.assertTrue(Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
    Assert.assertTrue(Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
}
Also used : Path(org.apache.hadoop.fs.Path) BlockStoragePolicySuite(org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) FileSystem(org.apache.hadoop.fs.FileSystem) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) BlockStoragePolicySpi(org.apache.hadoop.fs.BlockStoragePolicySpi) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 92 with HashSet

use of java.util.HashSet in project hadoop by apache.

the class TestInjectionForSimulatedStorage method testInjection.

/* This test makes sure that NameNode retries all the available blocks 
   * for under replicated blocks. This test uses simulated storage and one
   * of its features to inject blocks,
   * 
   * It creates a file with several blocks and replication of 4. 
   * The cluster is then shut down - NN retains its state but the DNs are 
   * all simulated and hence loose their blocks. 
   * The blocks are then injected in one of the DNs. The  expected behaviour is
   * that the NN will arrange for themissing replica will be copied from a valid source.
   */
@Test
public void testInjection() throws IOException {
    MiniDFSCluster cluster = null;
    String testFile = "/replication-test-file";
    Path testPath = new Path(testFile);
    byte[] buffer = new byte[1024];
    for (int i = 0; i < buffer.length; i++) {
        buffer[i] = '1';
    }
    try {
        Configuration conf = new HdfsConfiguration();
        conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, Integer.toString(numDataNodes));
        conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, checksumSize);
        SimulatedFSDataset.setFactory(conf);
        //first time format
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
        cluster.waitActive();
        String bpid = cluster.getNamesystem().getBlockPoolId();
        DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
        DFSTestUtil.createFile(cluster.getFileSystem(), testPath, filesize, filesize, blockSize, (short) numDataNodes, 0L);
        waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, 20);
        List<Map<DatanodeStorage, BlockListAsLongs>> blocksList = cluster.getAllBlockReports(bpid);
        cluster.shutdown();
        cluster = null;
        /* Start the MiniDFSCluster with more datanodes since once a writeBlock
       * to a datanode node fails, same block can not be written to it
       * immediately. In our case some replication attempts will fail.
       */
        LOG.info("Restarting minicluster");
        conf = new HdfsConfiguration();
        SimulatedFSDataset.setFactory(conf);
        conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f");
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes * 2).format(false).build();
        cluster.waitActive();
        Set<Block> uniqueBlocks = new HashSet<Block>();
        for (Map<DatanodeStorage, BlockListAsLongs> map : blocksList) {
            for (BlockListAsLongs blockList : map.values()) {
                for (Block b : blockList) {
                    uniqueBlocks.add(new Block(b));
                }
            }
        }
        // Insert all the blocks in the first data node
        LOG.info("Inserting " + uniqueBlocks.size() + " blocks");
        cluster.injectBlocks(0, uniqueBlocks, null);
        dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
        waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Map(java.util.Map) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 93 with HashSet

use of java.util.HashSet in project hadoop by apache.

the class TestBalancer method testBalancerCliWithIncludeList.

/**
   * Test a cluster with even distribution,
   * then three nodes are added to the cluster,
   * runs balancer with two of the nodes in the include list
   */
@Test(timeout = 100000)
public void testBalancerCliWithIncludeList() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    initConf(conf);
    Set<String> includeHosts = new HashSet<String>();
    includeHosts.add("datanodeY");
    doTest(conf, new long[] { CAPACITY, CAPACITY }, new String[] { RACK0, RACK1 }, CAPACITY, RACK2, new HostNameBasedNodes(new String[] { "datanodeX", "datanodeY", "datanodeZ" }, BalancerParameters.DEFAULT.getExcludedNodes(), includeHosts), true, false);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 94 with HashSet

use of java.util.HashSet in project hadoop by apache.

the class TestDFSNetworkTopology method testChooseRandomWithStorageTypeWrapper.

/**
   * This test tests the wrapper method. The wrapper method only takes one scope
   * where if it starts with a ~, it is an excluded scope, and searching always
   * from root. Otherwise it is a scope.
   * @throws Exception throws exception.
   */
@Test
public void testChooseRandomWithStorageTypeWrapper() throws Exception {
    Node n;
    DatanodeDescriptor dd;
    n = CLUSTER.chooseRandomWithStorageType("/l2/d3/r4", null, null, StorageType.ARCHIVE);
    HashSet<Node> excluded = new HashSet<>();
    // exclude the host on r4 (since there is only one host, no randomness here)
    excluded.add(n);
    // search with given scope being desired scope
    for (int i = 0; i < 10; i++) {
        n = CLUSTER.chooseRandomWithStorageType("/l2/d3", null, StorageType.ARCHIVE);
        assertTrue(n instanceof DatanodeDescriptor);
        dd = (DatanodeDescriptor) n;
        assertTrue(dd.getHostName().equals("host12") || dd.getHostName().equals("host13"));
    }
    for (int i = 0; i < 10; i++) {
        n = CLUSTER.chooseRandomWithStorageType("/l2/d3", excluded, StorageType.ARCHIVE);
        assertTrue(n instanceof DatanodeDescriptor);
        dd = (DatanodeDescriptor) n;
        assertTrue(dd.getHostName().equals("host13"));
    }
    // so if we exclude /l2/d4/r1, if should be always either host7 or host10
    for (int i = 0; i < 10; i++) {
        n = CLUSTER.chooseRandomWithStorageType("~/l2/d4", null, StorageType.RAM_DISK);
        assertTrue(n instanceof DatanodeDescriptor);
        dd = (DatanodeDescriptor) n;
        assertTrue(dd.getHostName().equals("host7") || dd.getHostName().equals("host10"));
    }
    // similar to above, except that we also exclude host10 here. so it should
    // always be host7
    n = CLUSTER.chooseRandomWithStorageType("/l2/d3/r2", null, null, StorageType.RAM_DISK);
    // add host10 to exclude
    excluded.add(n);
    for (int i = 0; i < 10; i++) {
        n = CLUSTER.chooseRandomWithStorageType("~/l2/d4", excluded, StorageType.RAM_DISK);
        assertTrue(n instanceof DatanodeDescriptor);
        dd = (DatanodeDescriptor) n;
        assertTrue(dd.getHostName().equals("host7"));
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) Node(org.apache.hadoop.net.Node) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 95 with HashSet

use of java.util.HashSet in project hadoop by apache.

the class ContainerLauncherImpl method serviceStart.

protected void serviceStart() throws Exception {
    ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("ContainerLauncher #%d").setDaemon(true).build();
    // Start with a default core-pool size of 10 and change it dynamically.
    launcherPool = new HadoopThreadPoolExecutor(initialPoolSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
    eventHandlingThread = new Thread() {

        @Override
        public void run() {
            ContainerLauncherEvent event = null;
            Set<String> allNodes = new HashSet<String>();
            while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
                try {
                    event = eventQueue.take();
                } catch (InterruptedException e) {
                    if (!stopped.get()) {
                        LOG.error("Returning, interrupted : " + e);
                    }
                    return;
                }
                allNodes.add(event.getContainerMgrAddress());
                int poolSize = launcherPool.getCorePoolSize();
                // maximum limit yet.
                if (poolSize != limitOnPoolSize) {
                    // nodes where containers will run at *this* point of time. This is
                    // *not* the cluster size and doesn't need to be.
                    int numNodes = allNodes.size();
                    int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
                    if (poolSize < idealPoolSize) {
                        // Bump up the pool size to idealPoolSize+initialPoolSize, the
                        // later is just a buffer so we are not always increasing the
                        // pool-size
                        int newPoolSize = Math.min(limitOnPoolSize, idealPoolSize + initialPoolSize);
                        LOG.info("Setting ContainerLauncher pool size to " + newPoolSize + " as number-of-nodes to talk to is " + numNodes);
                        launcherPool.setCorePoolSize(newPoolSize);
                    }
                }
                // the events from the queue are handled in parallel
                // using a thread pool
                launcherPool.execute(createEventProcessor(event));
            // TODO: Group launching of multiple containers to a single
            // NodeManager into a single connection
            }
        }
    };
    eventHandlingThread.setName("ContainerLauncher Event Handler");
    eventHandlingThread.start();
    super.serviceStart();
}
Also used : ThreadFactory(java.util.concurrent.ThreadFactory) HashSet(java.util.HashSet) Set(java.util.Set) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) HadoopThreadPoolExecutor(org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor)

Aggregations

HashSet (java.util.HashSet)12137 Set (java.util.Set)2609 ArrayList (java.util.ArrayList)2318 HashMap (java.util.HashMap)2096 Test (org.junit.Test)2060 Map (java.util.Map)1198 Iterator (java.util.Iterator)979 IOException (java.io.IOException)934 List (java.util.List)911 File (java.io.File)607 LinkedHashSet (java.util.LinkedHashSet)460 Test (org.testng.annotations.Test)460 TreeSet (java.util.TreeSet)271 Collection (java.util.Collection)233 LinkedList (java.util.LinkedList)224 Region (org.apache.geode.cache.Region)202 SSOException (com.iplanet.sso.SSOException)188 Date (java.util.Date)180 LinkedHashMap (java.util.LinkedHashMap)169 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)166