Search in sources :

Example 21 with Set

use of java.util.Set in project groovy by apache.

the class ObservableList method retainAll.

public boolean retainAll(Collection c) {
    if (c == null) {
        return false;
    }
    List values = new ArrayList();
    // GROOVY-7783 use Set for O(1) performance for contains
    if (!(c instanceof Set)) {
        c = new HashSet<Object>(c);
    }
    for (Object element : delegate) {
        if (!c.contains(element)) {
            values.add(element);
        }
    }
    int oldSize = size();
    boolean success = delegate.retainAll(c);
    if (success && !values.isEmpty()) {
        fireMultiElementRemovedEvent(values);
        fireSizeChangedEvent(oldSize, size());
    }
    return success;
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) ArrayList(java.util.ArrayList) List(java.util.List) ArrayList(java.util.ArrayList)

Example 22 with Set

use of java.util.Set in project hadoop by apache.

the class TestKMS method testDelegationTokensOps.

private void testDelegationTokensOps(final boolean ssl, final boolean kerb) throws Exception {
    final File confDir = getTestDir();
    final Configuration conf;
    if (kerb) {
        conf = setupConfForKerberos(confDir);
    } else {
        conf = createBaseKMSConf(confDir, null);
    }
    final String keystore;
    final String password;
    if (ssl) {
        final String sslConfDir = KeyStoreTestUtil.getClasspathDir(TestKMS.class);
        KeyStoreTestUtil.setupSSLConfig(confDir.getAbsolutePath(), sslConfDir, conf, false);
        keystore = confDir.getAbsolutePath() + "/serverKS.jks";
        password = "serverP";
    } else {
        keystore = null;
        password = null;
    }
    writeConf(confDir, conf);
    runServer(keystore, password, confDir, new KMSCallable<Void>() {

        @Override
        public Void call() throws Exception {
            final Configuration clientConf = new Configuration();
            final URI uri = createKMSUri(getKMSUrl());
            clientConf.set(KeyProviderFactory.KEY_PROVIDER_PATH, createKMSUri(getKMSUrl()).toString());
            doAs("client", new PrivilegedExceptionAction<Void>() {

                @Override
                public Void run() throws Exception {
                    KeyProvider kp = createProvider(uri, clientConf);
                    // test delegation token retrieval
                    KeyProviderDelegationTokenExtension kpdte = KeyProviderDelegationTokenExtension.createKeyProviderDelegationTokenExtension(kp);
                    final Credentials credentials = new Credentials();
                    final Token<?>[] tokens = kpdte.addDelegationTokens("client1", credentials);
                    Assert.assertEquals(1, credentials.getAllTokens().size());
                    InetSocketAddress kmsAddr = new InetSocketAddress(getKMSUrl().getHost(), getKMSUrl().getPort());
                    Assert.assertEquals(KMSDelegationToken.TOKEN_KIND, credentials.getToken(SecurityUtil.buildTokenService(kmsAddr)).getKind());
                    // Test non-renewer user cannot renew.
                    for (Token<?> token : tokens) {
                        if (!(token.getKind().equals(KMSDelegationToken.TOKEN_KIND))) {
                            LOG.info("Skipping token {}", token);
                            continue;
                        }
                        LOG.info("Got dt for " + uri + "; " + token);
                        try {
                            token.renew(clientConf);
                            Assert.fail("client should not be allowed to renew token with" + "renewer=client1");
                        } catch (Exception e) {
                            final DelegationTokenIdentifier identifier = (DelegationTokenIdentifier) token.decodeIdentifier();
                            GenericTestUtils.assertExceptionContains("tries to renew a token (" + identifier + ") with non-matching renewer", e);
                        }
                    }
                    final UserGroupInformation otherUgi;
                    if (kerb) {
                        UserGroupInformation.loginUserFromKeytab("client1", keytab.getAbsolutePath());
                        otherUgi = UserGroupInformation.getLoginUser();
                    } else {
                        otherUgi = UserGroupInformation.createUserForTesting("client1", new String[] { "other group" });
                        UserGroupInformation.setLoginUser(otherUgi);
                    }
                    try {
                        // test delegation token renewal via renewer
                        otherUgi.doAs(new PrivilegedExceptionAction<Void>() {

                            @Override
                            public Void run() throws Exception {
                                boolean renewed = false;
                                for (Token<?> token : tokens) {
                                    if (!(token.getKind().equals(KMSDelegationToken.TOKEN_KIND))) {
                                        LOG.info("Skipping token {}", token);
                                        continue;
                                    }
                                    LOG.info("Got dt for " + uri + "; " + token);
                                    long tokenLife = token.renew(clientConf);
                                    LOG.info("Renewed token of kind {}, new lifetime:{}", token.getKind(), tokenLife);
                                    Thread.sleep(100);
                                    long newTokenLife = token.renew(clientConf);
                                    LOG.info("Renewed token of kind {}, new lifetime:{}", token.getKind(), newTokenLife);
                                    Assert.assertTrue(newTokenLife > tokenLife);
                                    renewed = true;
                                }
                                Assert.assertTrue(renewed);
                                // test delegation token cancellation
                                for (Token<?> token : tokens) {
                                    if (!(token.getKind().equals(KMSDelegationToken.TOKEN_KIND))) {
                                        LOG.info("Skipping token {}", token);
                                        continue;
                                    }
                                    LOG.info("Got dt for " + uri + "; " + token);
                                    token.cancel(clientConf);
                                    LOG.info("Cancelled token of kind {}", token.getKind());
                                    try {
                                        token.renew(clientConf);
                                        Assert.fail("should not be able to renew a canceled token");
                                    } catch (Exception e) {
                                        LOG.info("Expected exception when renewing token", e);
                                    }
                                }
                                return null;
                            }
                        });
                        // Close the client provider. We will verify all providers'
                        // Truststore reloader threads are closed later.
                        kp.close();
                        return null;
                    } finally {
                        otherUgi.logoutUserFromKeytab();
                    }
                }
            });
            return null;
        }
    });
    // verify that providers created by KMSTokenRenewer are closed.
    if (ssl) {
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                final Set<Thread> threadSet = Thread.getAllStackTraces().keySet();
                for (Thread t : threadSet) {
                    if (t.getName().contains(SSL_RELOADER_THREAD_NAME)) {
                        return false;
                    }
                }
                return true;
            }
        }, 1000, 10000);
    }
}
Also used : KeyProvider(org.apache.hadoop.crypto.key.KeyProvider) KeyProviderDelegationTokenExtension(org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension) Set(java.util.Set) HashSet(java.util.HashSet) Configuration(org.apache.hadoop.conf.Configuration) DelegationTokenIdentifier(org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier) InetSocketAddress(java.net.InetSocketAddress) KMSDelegationToken(org.apache.hadoop.crypto.key.kms.KMSDelegationToken) Token(org.apache.hadoop.security.token.Token) PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) URI(java.net.URI) AuthorizationException(org.apache.hadoop.security.authorize.AuthorizationException) SocketTimeoutException(java.net.SocketTimeoutException) IOException(java.io.IOException) File(java.io.File) Credentials(org.apache.hadoop.security.Credentials) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 23 with Set

use of java.util.Set in project hadoop by apache.

the class ContainerLauncherImpl method serviceStart.

protected void serviceStart() throws Exception {
    ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("ContainerLauncher #%d").setDaemon(true).build();
    // Start with a default core-pool size of 10 and change it dynamically.
    launcherPool = new HadoopThreadPoolExecutor(initialPoolSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
    eventHandlingThread = new Thread() {

        @Override
        public void run() {
            ContainerLauncherEvent event = null;
            Set<String> allNodes = new HashSet<String>();
            while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
                try {
                    event = eventQueue.take();
                } catch (InterruptedException e) {
                    if (!stopped.get()) {
                        LOG.error("Returning, interrupted : " + e);
                    }
                    return;
                }
                allNodes.add(event.getContainerMgrAddress());
                int poolSize = launcherPool.getCorePoolSize();
                // maximum limit yet.
                if (poolSize != limitOnPoolSize) {
                    // nodes where containers will run at *this* point of time. This is
                    // *not* the cluster size and doesn't need to be.
                    int numNodes = allNodes.size();
                    int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
                    if (poolSize < idealPoolSize) {
                        // Bump up the pool size to idealPoolSize+initialPoolSize, the
                        // later is just a buffer so we are not always increasing the
                        // pool-size
                        int newPoolSize = Math.min(limitOnPoolSize, idealPoolSize + initialPoolSize);
                        LOG.info("Setting ContainerLauncher pool size to " + newPoolSize + " as number-of-nodes to talk to is " + numNodes);
                        launcherPool.setCorePoolSize(newPoolSize);
                    }
                }
                // the events from the queue are handled in parallel
                // using a thread pool
                launcherPool.execute(createEventProcessor(event));
            // TODO: Group launching of multiple containers to a single
            // NodeManager into a single connection
            }
        }
    };
    eventHandlingThread.setName("ContainerLauncher Event Handler");
    eventHandlingThread.start();
    super.serviceStart();
}
Also used : ThreadFactory(java.util.concurrent.ThreadFactory) HashSet(java.util.HashSet) Set(java.util.Set) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) HadoopThreadPoolExecutor(org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor)

Example 24 with Set

use of java.util.Set in project hadoop by apache.

the class TestBalancer method testMinBlockSizeAndSourceNodes.

/** Balancer should not move blocks with size < minBlockSize. */
@Test(timeout = 60000)
public void testMinBlockSizeAndSourceNodes() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    initConf(conf);
    final short replication = 3;
    final long[] lengths = { 10, 10, 10, 10 };
    final long[] capacities = new long[replication];
    final long totalUsed = capacities.length * sum(lengths);
    Arrays.fill(capacities, 1000);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length).simulatedCapacities(capacities).build();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, dfs.getUri(), ClientProtocol.class).getProxy();
    // fill up the cluster to be 80% full
    for (int i = 0; i < lengths.length; i++) {
        final long size = lengths[i];
        final Path p = new Path("/file" + i + "_size" + size);
        try (final OutputStream out = dfs.create(p)) {
            for (int j = 0; j < size; j++) {
                out.write(j);
            }
        }
    }
    // start up an empty node with the same capacity
    cluster.startDataNodes(conf, capacities.length, true, null, null, capacities);
    LOG.info("capacities    = " + Arrays.toString(capacities));
    LOG.info("totalUsedSpace= " + totalUsed);
    LOG.info("lengths       = " + Arrays.toString(lengths) + ", #=" + lengths.length);
    waitForHeartBeat(totalUsed, 2 * capacities[0] * capacities.length, client, cluster);
    final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
    {
        // run Balancer with min-block-size=50
        final BalancerParameters p = Balancer.Cli.parse(new String[] { "-policy", BalancingPolicy.Node.INSTANCE.getName(), "-threshold", "1" });
        assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
        assertEquals(p.getThreshold(), 1.0, 0.001);
        conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50);
        final int r = Balancer.run(namenodes, p, conf);
        assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
    }
    conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
    {
        // run Balancer with empty nodes as source nodes
        final Set<String> sourceNodes = new HashSet<>();
        final List<DataNode> datanodes = cluster.getDataNodes();
        for (int i = capacities.length; i < datanodes.size(); i++) {
            sourceNodes.add(datanodes.get(i).getDisplayName());
        }
        final BalancerParameters p = Balancer.Cli.parse(new String[] { "-policy", BalancingPolicy.Node.INSTANCE.getName(), "-threshold", "1", "-source", StringUtils.join(sourceNodes, ',') });
        assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
        assertEquals(p.getThreshold(), 1.0, 0.001);
        assertEquals(p.getSourceNodes(), sourceNodes);
        conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50);
        final int r = Balancer.run(namenodes, p, conf);
        assertEquals(ExitStatus.NO_MOVE_BLOCK.getExitCode(), r);
    }
    {
        // run Balancer with a filled node as a source node
        final Set<String> sourceNodes = new HashSet<>();
        final List<DataNode> datanodes = cluster.getDataNodes();
        sourceNodes.add(datanodes.get(0).getDisplayName());
        final BalancerParameters p = Balancer.Cli.parse(new String[] { "-policy", BalancingPolicy.Node.INSTANCE.getName(), "-threshold", "1", "-source", StringUtils.join(sourceNodes, ',') });
        assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
        assertEquals(p.getThreshold(), 1.0, 0.001);
        assertEquals(p.getSourceNodes(), sourceNodes);
        conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
        final int r = Balancer.run(namenodes, p, conf);
        assertEquals(ExitStatus.NO_MOVE_BLOCK.getExitCode(), r);
    }
    {
        // run Balancer with all filled node as source nodes
        final Set<String> sourceNodes = new HashSet<>();
        final List<DataNode> datanodes = cluster.getDataNodes();
        for (int i = 0; i < capacities.length; i++) {
            sourceNodes.add(datanodes.get(i).getDisplayName());
        }
        final BalancerParameters p = Balancer.Cli.parse(new String[] { "-policy", BalancingPolicy.Node.INSTANCE.getName(), "-threshold", "1", "-source", StringUtils.join(sourceNodes, ',') });
        assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
        assertEquals(p.getThreshold(), 1.0, 0.001);
        assertEquals(p.getSourceNodes(), sourceNodes);
        conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
        final int r = Balancer.run(namenodes, p, conf);
        assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Set(java.util.Set) HashSet(java.util.HashSet) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) URI(java.net.URI) ArrayList(java.util.ArrayList) List(java.util.List) Test(org.junit.Test)

Example 25 with Set

use of java.util.Set in project hadoop by apache.

the class TestBalancerWithMultipleNameNodes method unevenDistribution.

/**
   * First start a cluster and fill the cluster up to a certain size. Then
   * redistribute blocks according the required distribution. Finally, balance
   * the cluster.
   *
   * @param nNameNodes Number of NameNodes
   * @param nNameNodesToBalance Number of NameNodes to run the balancer on
   * @param distributionPerNN The distribution for each NameNode.
   * @param capacities Capacities of the datanodes
   * @param racks Rack names
   * @param conf Configuration
   */
private void unevenDistribution(final int nNameNodes, final int nNameNodesToBalance, long[] distributionPerNN, long[] capacities, String[] racks, Configuration conf) throws Exception {
    LOG.info("UNEVEN 0");
    final int nDataNodes = distributionPerNN.length;
    if (capacities.length != nDataNodes || racks.length != nDataNodes) {
        throw new IllegalArgumentException("Array length is not the same");
    }
    if (nNameNodesToBalance > nNameNodes) {
        throw new IllegalArgumentException("Number of namenodes to balance is " + "greater than the number of namenodes.");
    }
    // calculate total space that need to be filled
    final long usedSpacePerNN = TestBalancer.sum(distributionPerNN);
    // fill the cluster
    final ExtendedBlock[][] blocks;
    {
        LOG.info("UNEVEN 1");
        final MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration(conf)).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes)).numDataNodes(nDataNodes).racks(racks).simulatedCapacities(capacities).build();
        LOG.info("UNEVEN 2");
        try {
            cluster.waitActive();
            DFSTestUtil.setFederatedConfiguration(cluster, conf);
            LOG.info("UNEVEN 3");
            final Suite s = new Suite(cluster, nNameNodes, nDataNodes, null, conf);
            blocks = generateBlocks(s, usedSpacePerNN);
            LOG.info("UNEVEN 4");
        } finally {
            cluster.shutdown();
        }
    }
    conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f");
    {
        LOG.info("UNEVEN 10");
        final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes)).numDataNodes(nDataNodes).racks(racks).simulatedCapacities(capacities).format(false).build();
        LOG.info("UNEVEN 11");
        try {
            cluster.waitActive();
            LOG.info("UNEVEN 12");
            Set<String> blockpools = new HashSet<String>();
            for (int i = 0; i < nNameNodesToBalance; i++) {
                blockpools.add(cluster.getNamesystem(i).getBlockPoolId());
            }
            BalancerParameters.Builder b = new BalancerParameters.Builder();
            b.setBlockpools(blockpools);
            BalancerParameters params = b.build();
            final Suite s = new Suite(cluster, nNameNodes, nDataNodes, params, conf);
            for (int n = 0; n < nNameNodes; n++) {
                // redistribute blocks
                final Block[][] blocksDN = TestBalancer.distributeBlocks(blocks[n], s.replication, distributionPerNN);
                for (int d = 0; d < blocksDN.length; d++) cluster.injectBlocks(n, d, Arrays.asList(blocksDN[d]));
                LOG.info("UNEVEN 13: n=" + n);
            }
            final long totalCapacity = TestBalancer.sum(capacities);
            final long totalUsed = nNameNodes * usedSpacePerNN;
            LOG.info("UNEVEN 14");
            runBalancer(s, totalUsed, totalCapacity);
            LOG.info("UNEVEN 15");
        } finally {
            cluster.shutdown();
        }
        LOG.info("UNEVEN 16");
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HashSet(java.util.HashSet) Set(java.util.Set) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) BalancerParameters(org.apache.hadoop.hdfs.server.balancer.BalancerParameters) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Aggregations

Set (java.util.Set)6789 HashSet (java.util.HashSet)4372 HashMap (java.util.HashMap)2090 Map (java.util.Map)1865 Iterator (java.util.Iterator)1774 ArrayList (java.util.ArrayList)1113 List (java.util.List)980 Test (org.junit.Test)920 TreeSet (java.util.TreeSet)536 IOException (java.io.IOException)501 SSOException (com.iplanet.sso.SSOException)467 LinkedHashSet (java.util.LinkedHashSet)418 SMSException (com.sun.identity.sm.SMSException)347 IdRepoException (com.sun.identity.idm.IdRepoException)268 Collection (java.util.Collection)259 ImmutableSet (com.google.common.collect.ImmutableSet)256 File (java.io.File)245 SSOToken (com.iplanet.sso.SSOToken)226 Collectors (java.util.stream.Collectors)219 Test (org.testng.annotations.Test)209