Search in sources :

Example 56 with Set

use of java.util.Set in project hbase by apache.

the class BuilderStyleTest method assertClassesAreBuilderStyle.

/*
   * If a base class Foo declares a method setFoo() returning Foo, then the subclass should
   * re-declare the methods overriding the return class with the subclass:
   *
   * class Foo {
   *   Foo setFoo() {
   *     ..
   *     return this;
   *   }
   * }
   *
   * class Bar {
   *   Bar setFoo() {
   *     return (Bar) super.setFoo();
   *   }
   * }
   *
   */
@SuppressWarnings("rawtypes")
public static void assertClassesAreBuilderStyle(Class... classes) {
    for (Class clazz : classes) {
        System.out.println("Checking " + clazz);
        Method[] methods = clazz.getDeclaredMethods();
        Map<String, Set<Method>> methodsBySignature = new HashMap<>();
        for (Method method : methods) {
            if (!Modifier.isPublic(method.getModifiers())) {
                // only public classes
                continue;
            }
            Class<?> ret = method.getReturnType();
            if (method.getName().startsWith("set") || method.getName().startsWith("add")) {
                System.out.println("  " + clazz.getSimpleName() + "." + method.getName() + "() : " + ret.getSimpleName());
                // because of subclass / super class method overrides, we group the methods fitting the
                // same signatures because we get two method definitions from java reflection:
                // Mutation.setDurability() : Mutation
                //   Delete.setDurability() : Mutation
                // Delete.setDurability() : Delete
                String sig = method.getName();
                for (Class<?> param : method.getParameterTypes()) {
                    sig += param.getName();
                }
                Set<Method> sigMethods = methodsBySignature.get(sig);
                if (sigMethods == null) {
                    sigMethods = new HashSet<>();
                    methodsBySignature.put(sig, sigMethods);
                }
                sigMethods.add(method);
            }
        }
        // now iterate over the methods by signatures
        for (Map.Entry<String, Set<Method>> e : methodsBySignature.entrySet()) {
            // at least one of method sigs should return the declaring class
            boolean found = false;
            for (Method m : e.getValue()) {
                found = clazz.isAssignableFrom(m.getReturnType());
                if (found)
                    break;
            }
            String errorMsg = "All setXXX()|addXX() methods in " + clazz.getSimpleName() + " should return a " + clazz.getSimpleName() + " object in builder style. " + "Offending method:" + e.getValue().iterator().next().getName();
            assertTrue(errorMsg, found);
        }
    }
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) Method(java.lang.reflect.Method) Map(java.util.Map) HashMap(java.util.HashMap)

Example 57 with Set

use of java.util.Set in project hbase by apache.

the class DumpReplicationQueues method dumpReplicationQueues.

private int dumpReplicationQueues(DumpOptions opts) throws Exception {
    Configuration conf = getConf();
    HBaseAdmin.available(conf);
    ClusterConnection connection = (ClusterConnection) ConnectionFactory.createConnection(conf);
    Admin admin = connection.getAdmin();
    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "DumpReplicationQueues" + System.currentTimeMillis(), new WarnOnlyAbortable(), true);
    try {
        // Our zk watcher
        LOG.info("Our Quorum: " + zkw.getQuorum());
        List<TableCFs> replicatedTableCFs = admin.listReplicatedTableCFs();
        if (replicatedTableCFs.isEmpty()) {
            LOG.info("No tables with a configured replication peer were found.");
            return (0);
        } else {
            LOG.info("Replicated Tables: " + replicatedTableCFs);
        }
        List<ReplicationPeerDescription> peers = admin.listReplicationPeers();
        if (peers.isEmpty()) {
            LOG.info("Replication is enabled but no peer configuration was found.");
        }
        System.out.println("Dumping replication peers and configurations:");
        System.out.println(dumpPeersState(peers));
        if (opts.isDistributed()) {
            LOG.info("Found [--distributed], will poll each RegionServer.");
            Set<String> peerIds = peers.stream().map((peer) -> peer.getPeerId()).collect(Collectors.toSet());
            System.out.println(dumpQueues(connection, zkw, peerIds, opts.isHdfs()));
            System.out.println(dumpReplicationSummary());
        } else {
            // use ZK instead
            System.out.print("Dumping replication znodes via ZooKeeper:");
            System.out.println(ZKUtil.getReplicationZnodesDump(zkw));
        }
        return (0);
    } catch (IOException e) {
        return (-1);
    } finally {
        zkw.close();
    }
}
Also used : StringUtils(org.apache.hadoop.hbase.procedure2.util.StringUtils) ReplicationTracker(org.apache.hadoop.hbase.replication.ReplicationTracker) Arrays(java.util.Arrays) FileSystem(org.apache.hadoop.fs.FileSystem) ReplicationFactory(org.apache.hadoop.hbase.replication.ReplicationFactory) FileStatus(org.apache.hadoop.fs.FileStatus) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) ArrayList(java.util.ArrayList) WALLink(org.apache.hadoop.hbase.io.WALLink) ReplicationQueueInfo(org.apache.hadoop.hbase.replication.ReplicationQueueInfo) Configured(org.apache.hadoop.conf.Configured) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) TableCFs(org.apache.hadoop.hbase.client.replication.TableCFs) AtomicLongMap(com.google.common.util.concurrent.AtomicLongMap) LinkedList(java.util.LinkedList) KeeperException(org.apache.zookeeper.KeeperException) ZKUtil(org.apache.hadoop.hbase.zookeeper.ZKUtil) Abortable(org.apache.hadoop.hbase.Abortable) ReplicationQueuesClientArguments(org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments) ToolRunner(org.apache.hadoop.util.ToolRunner) Set(java.util.Set) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) IOException(java.io.IOException) ReplicationPeers(org.apache.hadoop.hbase.replication.ReplicationPeers) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) Collectors(java.util.stream.Collectors) Stoppable(org.apache.hadoop.hbase.Stoppable) FileNotFoundException(java.io.FileNotFoundException) ConnectionFactory(org.apache.hadoop.hbase.client.ConnectionFactory) Tool(org.apache.hadoop.util.Tool) List(java.util.List) ReplicationQueuesClient(org.apache.hadoop.hbase.replication.ReplicationQueuesClient) Admin(org.apache.hadoop.hbase.client.Admin) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription) Log(org.apache.commons.logging.Log) Queue(java.util.Queue) LogFactory(org.apache.commons.logging.LogFactory) ReplicationQueues(org.apache.hadoop.hbase.replication.ReplicationQueues) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) IOException(java.io.IOException) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) Admin(org.apache.hadoop.hbase.client.Admin) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) TableCFs(org.apache.hadoop.hbase.client.replication.TableCFs) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription)

Example 58 with Set

use of java.util.Set in project hadoop by apache.

the class TestZKRMStateStorePerf method run.

@SuppressWarnings("unchecked")
@Override
public int run(String[] args) {
    LOG.info("Starting ZKRMStateStorePerf ver." + version);
    int numApp = ZK_PERF_NUM_APP_DEFAULT;
    int numAppAttemptPerApp = ZK_PERF_NUM_APPATTEMPT_PER_APP;
    String hostPort = null;
    boolean launchLocalZK = true;
    if (args.length == 0) {
        System.err.println("Missing arguments.");
        return -1;
    }
    for (int i = 0; i < args.length; i++) {
        // parse command line
        if (args[i].equalsIgnoreCase("-appsize")) {
            numApp = Integer.parseInt(args[++i]);
        } else if (args[i].equalsIgnoreCase("-appattemptsize")) {
            numAppAttemptPerApp = Integer.parseInt(args[++i]);
        } else if (args[i].equalsIgnoreCase("-hostPort")) {
            hostPort = args[++i];
            launchLocalZK = false;
        } else if (args[i].equalsIgnoreCase("-workingZnode")) {
            workingZnode = args[++i];
        } else {
            System.err.println("Illegal argument: " + args[i]);
            return -1;
        }
    }
    if (launchLocalZK) {
        try {
            setUpZKServer();
        } catch (Exception e) {
            System.err.println("failed to setup. : " + e.getMessage());
            return -1;
        }
    }
    initStore(hostPort);
    long submitTime = System.currentTimeMillis();
    long startTime = System.currentTimeMillis() + 1234;
    ArrayList<ApplicationId> applicationIds = new ArrayList<>();
    ArrayList<RMApp> rmApps = new ArrayList<>();
    ArrayList<ApplicationAttemptId> attemptIds = new ArrayList<>();
    HashMap<ApplicationId, Set<ApplicationAttemptId>> appIdsToAttemptId = new HashMap<>();
    TestDispatcher dispatcher = new TestDispatcher();
    store.setRMDispatcher(dispatcher);
    for (int i = 0; i < numApp; i++) {
        ApplicationId appId = ApplicationId.newInstance(clusterTimeStamp, i);
        applicationIds.add(appId);
        ArrayList<ApplicationAttemptId> attemptIdsForThisApp = new ArrayList<>();
        for (int j = 0; j < numAppAttemptPerApp; j++) {
            ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, j);
            attemptIdsForThisApp.add(attemptId);
        }
        appIdsToAttemptId.put(appId, new LinkedHashSet(attemptIdsForThisApp));
        attemptIds.addAll(attemptIdsForThisApp);
    }
    for (ApplicationId appId : applicationIds) {
        RMApp app = null;
        try {
            app = storeApp(store, appId, submitTime, startTime);
        } catch (Exception e) {
            System.err.println("failed to create Application Znode. : " + e.getMessage());
            return -1;
        }
        waitNotify(dispatcher);
        rmApps.add(app);
    }
    for (ApplicationAttemptId attemptId : attemptIds) {
        Token<AMRMTokenIdentifier> tokenId = generateAMRMToken(attemptId, appTokenMgr);
        SecretKey clientTokenKey = clientToAMTokenMgr.createMasterKey(attemptId);
        try {
            storeAttempt(store, attemptId, ContainerId.newContainerId(attemptId, 0L).toString(), tokenId, clientTokenKey, dispatcher);
        } catch (Exception e) {
            System.err.println("failed to create AppAttempt Znode. : " + e.getMessage());
            return -1;
        }
    }
    long storeStart = System.currentTimeMillis();
    try {
        store.loadState();
    } catch (Exception e) {
        System.err.println("failed to locaState from ZKRMStateStore. : " + e.getMessage());
        return -1;
    }
    long storeEnd = System.currentTimeMillis();
    long loadTime = storeEnd - storeStart;
    String resultMsg = "ZKRMStateStore takes " + loadTime + " msec to loadState.";
    LOG.info(resultMsg);
    System.out.println(resultMsg);
    // cleanup
    try {
        for (RMApp app : rmApps) {
            ApplicationStateData appState = ApplicationStateData.newInstance(app.getSubmitTime(), app.getStartTime(), app.getApplicationSubmissionContext(), app.getUser());
            ApplicationId appId = app.getApplicationId();
            Map m = mock(Map.class);
            when(m.keySet()).thenReturn(appIdsToAttemptId.get(appId));
            appState.attempts = m;
            store.removeApplicationStateInternal(appState);
        }
    } catch (Exception e) {
        System.err.println("failed to cleanup. : " + e.getMessage());
        return -1;
    }
    return 0;
}
Also used : LinkedHashSet(java.util.LinkedHashSet) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) LinkedHashSet(java.util.LinkedHashSet) Set(java.util.Set) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ApplicationStateData(org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData) SecretKey(javax.crypto.SecretKey) AMRMTokenIdentifier(org.apache.hadoop.yarn.security.AMRMTokenIdentifier) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) HashMap(java.util.HashMap) Map(java.util.Map)

Example 59 with Set

use of java.util.Set in project hadoop by apache.

the class TestRMNodeLabelsManager method testReplaceLabelsFromNode.

@Test
public void testReplaceLabelsFromNode() throws Exception {
    RMContext rmContext = mock(RMContext.class);
    Dispatcher syncDispatcher = new InlineDispatcher();
    SchedulerEventHandler schedEventsHandler = new SchedulerEventHandler();
    syncDispatcher.register(SchedulerEventType.class, schedEventsHandler);
    when(rmContext.getDispatcher()).thenReturn(syncDispatcher);
    mgr.setRMContext(rmContext);
    mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3"));
    mgr.activateNode(NodeId.newInstance("n1", 1), SMALL_RESOURCE);
    mgr.activateNode(NodeId.newInstance("n2", 1), SMALL_RESOURCE);
    mgr.activateNode(NodeId.newInstance("n3", 1), SMALL_RESOURCE);
    mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1:1"), toSet("p1"), toNodeId("n2:1"), toSet("p2"), toNodeId("n3"), toSet("p3")));
    assertTrue("Event should be sent when there is change in labels", schedEventsHandler.receivedEvent);
    assertEquals("3 node label mapping modified", 3, schedEventsHandler.updatedNodeToLabels.size());
    ImmutableMap<NodeId, Set<String>> modifiedMap = ImmutableMap.of(toNodeId("n1:1"), toSet("p1"), toNodeId("n2:1"), toSet("p2"), toNodeId("n3:1"), toSet("p3"));
    assertEquals("Node label mapping is not matching", modifiedMap, schedEventsHandler.updatedNodeToLabels);
    schedEventsHandler.receivedEvent = false;
    mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1:1"), toSet("p1")));
    assertFalse("No event should be sent when there is no change in labels", schedEventsHandler.receivedEvent);
    schedEventsHandler.receivedEvent = false;
    mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n2:1"), toSet("p1"), toNodeId("n3"), toSet("p3")));
    assertTrue("Event should be sent when there is change in labels", schedEventsHandler.receivedEvent);
    assertEquals("Single node label mapping modified", 1, schedEventsHandler.updatedNodeToLabels.size());
    assertCollectionEquals(toSet("p1"), schedEventsHandler.updatedNodeToLabels.get(toNodeId("n2:1")));
    schedEventsHandler.receivedEvent = false;
    mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n3"), toSet("p2")));
    assertTrue("Event should be sent when there is change in labels @ HOST", schedEventsHandler.receivedEvent);
    assertEquals("Single node label mapping modified", 1, schedEventsHandler.updatedNodeToLabels.size());
    assertCollectionEquals(toSet("p2"), schedEventsHandler.updatedNodeToLabels.get(toNodeId("n3:1")));
    schedEventsHandler.receivedEvent = false;
    mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p2")));
    assertTrue("Event should be sent when labels are modified at host though labels were set @ NM level", schedEventsHandler.receivedEvent);
    assertEquals("Single node label mapping modified", 1, schedEventsHandler.updatedNodeToLabels.size());
    assertCollectionEquals(toSet("p2"), schedEventsHandler.updatedNodeToLabels.get(toNodeId("n1:1")));
    schedEventsHandler.receivedEvent = false;
}
Also used : RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) NodeId(org.apache.hadoop.yarn.api.records.NodeId) InlineDispatcher(org.apache.hadoop.yarn.event.InlineDispatcher) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) InlineDispatcher(org.apache.hadoop.yarn.event.InlineDispatcher) Test(org.junit.Test)

Example 60 with Set

use of java.util.Set in project hadoop by apache.

the class DataGeneratorForTest method loadEntities.

static void loadEntities(HBaseTestingUtility util) throws IOException {
    TimelineEntities te = new TimelineEntities();
    TimelineEntity entity = new TimelineEntity();
    String id = "hello";
    String type = "world";
    entity.setId(id);
    entity.setType(type);
    Long cTime = 1425016502000L;
    entity.setCreatedTime(cTime);
    // add the info map in Timeline Entity
    Map<String, Object> infoMap = new HashMap<>();
    infoMap.put("infoMapKey1", "infoMapValue2");
    infoMap.put("infoMapKey2", 20);
    infoMap.put("infoMapKey3", 71.4);
    entity.addInfo(infoMap);
    // add the isRelatedToEntity info
    Set<String> isRelatedToSet = new HashSet<>();
    isRelatedToSet.add("relatedto1");
    Map<String, Set<String>> isRelatedTo = new HashMap<>();
    isRelatedTo.put("task", isRelatedToSet);
    entity.setIsRelatedToEntities(isRelatedTo);
    // add the relatesTo info
    Set<String> relatesToSet = new HashSet<String>();
    relatesToSet.add("relatesto1");
    relatesToSet.add("relatesto3");
    Map<String, Set<String>> relatesTo = new HashMap<>();
    relatesTo.put("container", relatesToSet);
    Set<String> relatesToSet11 = new HashSet<>();
    relatesToSet11.add("relatesto4");
    relatesTo.put("container1", relatesToSet11);
    entity.setRelatesToEntities(relatesTo);
    // add some config entries
    Map<String, String> conf = new HashMap<>();
    conf.put("config_param1", "value1");
    conf.put("config_param2", "value2");
    conf.put("cfg_param1", "value3");
    entity.addConfigs(conf);
    // add metrics
    Set<TimelineMetric> metrics = new HashSet<>();
    TimelineMetric m1 = new TimelineMetric();
    m1.setId("MAP_SLOT_MILLIS");
    Map<Long, Number> metricValues = new HashMap<>();
    long ts = System.currentTimeMillis();
    metricValues.put(ts - 120000, 100000000);
    metricValues.put(ts - 100000, 200000000);
    metricValues.put(ts - 80000, 300000000);
    metricValues.put(ts - 60000, 400000000);
    metricValues.put(ts - 40000, 50000000000L);
    metricValues.put(ts - 20000, 70000000000L);
    m1.setType(Type.TIME_SERIES);
    m1.setValues(metricValues);
    metrics.add(m1);
    TimelineMetric m12 = new TimelineMetric();
    m12.setId("MAP1_BYTES");
    m12.addValue(ts, 50);
    metrics.add(m12);
    entity.addMetrics(metrics);
    TimelineEvent event = new TimelineEvent();
    event.setId("start_event");
    event.setTimestamp(ts);
    entity.addEvent(event);
    te.addEntity(entity);
    TimelineEntity entity1 = new TimelineEntity();
    String id1 = "hello1";
    entity1.setId(id1);
    entity1.setType(type);
    entity1.setCreatedTime(cTime + 20L);
    // add the info map in Timeline Entity
    Map<String, Object> infoMap1 = new HashMap<>();
    infoMap1.put("infoMapKey1", "infoMapValue1");
    infoMap1.put("infoMapKey2", 10);
    entity1.addInfo(infoMap1);
    // add event.
    TimelineEvent event11 = new TimelineEvent();
    event11.setId("end_event");
    event11.setTimestamp(ts);
    entity1.addEvent(event11);
    TimelineEvent event12 = new TimelineEvent();
    event12.setId("update_event");
    event12.setTimestamp(ts - 10);
    entity1.addEvent(event12);
    // add the isRelatedToEntity info
    Set<String> isRelatedToSet1 = new HashSet<>();
    isRelatedToSet1.add("relatedto3");
    isRelatedToSet1.add("relatedto5");
    Map<String, Set<String>> isRelatedTo1 = new HashMap<>();
    isRelatedTo1.put("task1", isRelatedToSet1);
    Set<String> isRelatedToSet11 = new HashSet<>();
    isRelatedToSet11.add("relatedto4");
    isRelatedTo1.put("task2", isRelatedToSet11);
    entity1.setIsRelatedToEntities(isRelatedTo1);
    // add the relatesTo info
    Set<String> relatesToSet1 = new HashSet<String>();
    relatesToSet1.add("relatesto1");
    relatesToSet1.add("relatesto2");
    Map<String, Set<String>> relatesTo1 = new HashMap<>();
    relatesTo1.put("container", relatesToSet1);
    entity1.setRelatesToEntities(relatesTo1);
    // add some config entries
    Map<String, String> conf1 = new HashMap<>();
    conf1.put("cfg_param1", "value1");
    conf1.put("cfg_param2", "value2");
    entity1.addConfigs(conf1);
    // add metrics
    Set<TimelineMetric> metrics1 = new HashSet<>();
    TimelineMetric m2 = new TimelineMetric();
    m2.setId("MAP1_SLOT_MILLIS");
    Map<Long, Number> metricValues1 = new HashMap<>();
    long ts1 = System.currentTimeMillis();
    metricValues1.put(ts1 - 120000, 100000000);
    metricValues1.put(ts1 - 100000, 200000000);
    metricValues1.put(ts1 - 80000, 300000000);
    metricValues1.put(ts1 - 60000, 400000000);
    metricValues1.put(ts1 - 40000, 50000000000L);
    metricValues1.put(ts1 - 20000, 60000000000L);
    m2.setType(Type.TIME_SERIES);
    m2.setValues(metricValues1);
    metrics1.add(m2);
    entity1.addMetrics(metrics1);
    te.addEntity(entity1);
    TimelineEntity entity2 = new TimelineEntity();
    String id2 = "hello2";
    entity2.setId(id2);
    entity2.setType(type);
    entity2.setCreatedTime(cTime + 40L);
    TimelineEvent event21 = new TimelineEvent();
    event21.setId("update_event");
    event21.setTimestamp(ts - 20);
    entity2.addEvent(event21);
    Set<String> isRelatedToSet2 = new HashSet<>();
    isRelatedToSet2.add("relatedto3");
    Map<String, Set<String>> isRelatedTo2 = new HashMap<>();
    isRelatedTo2.put("task1", isRelatedToSet2);
    entity2.setIsRelatedToEntities(isRelatedTo2);
    Map<String, Set<String>> relatesTo3 = new HashMap<>();
    Set<String> relatesToSet14 = new HashSet<>();
    relatesToSet14.add("relatesto7");
    relatesTo3.put("container2", relatesToSet14);
    entity2.setRelatesToEntities(relatesTo3);
    te.addEntity(entity2);
    HBaseTimelineWriterImpl hbi = null;
    try {
        hbi = new HBaseTimelineWriterImpl();
        hbi.init(util.getConfiguration());
        hbi.start();
        String cluster = "cluster1";
        String user = "user1";
        String flow = "some_flow_name";
        String flowVersion = "AB7822C10F1111";
        long runid = 1002345678919L;
        String appName = "application_1231111111_1111";
        hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
        hbi.stop();
    } finally {
        if (hbi != null) {
            hbi.stop();
            hbi.close();
        }
    }
}
Also used : TimelineEvent(org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent) TimelineMetric(org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric) HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) TimelineEntity(org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity) TimelineEntities(org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities) HashSet(java.util.HashSet)

Aggregations

Set (java.util.Set)6789 HashSet (java.util.HashSet)4372 HashMap (java.util.HashMap)2090 Map (java.util.Map)1865 Iterator (java.util.Iterator)1774 ArrayList (java.util.ArrayList)1113 List (java.util.List)980 Test (org.junit.Test)920 TreeSet (java.util.TreeSet)536 IOException (java.io.IOException)501 SSOException (com.iplanet.sso.SSOException)467 LinkedHashSet (java.util.LinkedHashSet)418 SMSException (com.sun.identity.sm.SMSException)347 IdRepoException (com.sun.identity.idm.IdRepoException)268 Collection (java.util.Collection)259 ImmutableSet (com.google.common.collect.ImmutableSet)256 File (java.io.File)245 SSOToken (com.iplanet.sso.SSOToken)226 Collectors (java.util.stream.Collectors)219 Test (org.testng.annotations.Test)209