Search in sources :

Example 46 with Set

use of java.util.Set in project hadoop by apache.

the class HumanReadableHistoryViewerPrinter method printFailedAttempts.

private void printFailedAttempts(PrintStream ps, HistoryViewer.FilteredJob filteredJob) {
    Map<String, Set<TaskID>> badNodes = filteredJob.getFilteredMap();
    StringBuilder attempts = new StringBuilder();
    if (badNodes.size() > 0) {
        attempts.append("\n").append(filteredJob.getFilter());
        attempts.append(" task attempts by nodes");
        attempts.append("\nHostname\tFailedTasks");
        attempts.append("\n===============================");
        ps.println(attempts);
        for (Map.Entry<String, Set<TaskID>> entry : badNodes.entrySet()) {
            String node = entry.getKey();
            Set<TaskID> failedTasks = entry.getValue();
            attempts.setLength(0);
            attempts.append(node).append("\t");
            for (TaskID t : failedTasks) {
                attempts.append(t).append(", ");
            }
            ps.println(attempts);
        }
    }
}
Also used : Set(java.util.Set) TaskID(org.apache.hadoop.mapreduce.TaskID) Map(java.util.Map)

Example 47 with Set

use of java.util.Set in project hadoop by apache.

the class KeyValueBasedTimelineStore method put.

@Override
public synchronized TimelinePutResponse put(TimelineEntities data) {
    TimelinePutResponse response = new TimelinePutResponse();
    if (getServiceStopped()) {
        LOG.info("Service stopped, return null for the storage");
        TimelinePutError error = new TimelinePutError();
        error.setErrorCode(TimelinePutError.IO_EXCEPTION);
        response.addError(error);
        return response;
    }
    for (TimelineEntity entity : data.getEntities()) {
        EntityIdentifier entityId = new EntityIdentifier(entity.getEntityId(), entity.getEntityType());
        // store entity info in memory
        TimelineEntity existingEntity = entities.get(entityId);
        boolean needsPut = false;
        if (existingEntity == null) {
            existingEntity = new TimelineEntity();
            existingEntity.setEntityId(entity.getEntityId());
            existingEntity.setEntityType(entity.getEntityType());
            existingEntity.setStartTime(entity.getStartTime());
            if (entity.getDomainId() == null || entity.getDomainId().length() == 0) {
                TimelinePutError error = new TimelinePutError();
                error.setEntityId(entityId.getId());
                error.setEntityType(entityId.getType());
                error.setErrorCode(TimelinePutError.NO_DOMAIN);
                response.addError(error);
                continue;
            }
            existingEntity.setDomainId(entity.getDomainId());
            // insert a new entity to the storage, update insert time map
            entityInsertTimes.put(entityId, System.currentTimeMillis());
            needsPut = true;
        }
        if (entity.getEvents() != null) {
            if (existingEntity.getEvents() == null) {
                existingEntity.setEvents(entity.getEvents());
            } else {
                existingEntity.addEvents(entity.getEvents());
            }
            Collections.sort(existingEntity.getEvents());
            needsPut = true;
        }
        // check startTime
        if (existingEntity.getStartTime() == null) {
            if (existingEntity.getEvents() == null || existingEntity.getEvents().isEmpty()) {
                TimelinePutError error = new TimelinePutError();
                error.setEntityId(entityId.getId());
                error.setEntityType(entityId.getType());
                error.setErrorCode(TimelinePutError.NO_START_TIME);
                response.addError(error);
                entities.remove(entityId);
                entityInsertTimes.remove(entityId);
                continue;
            } else {
                Long min = Long.MAX_VALUE;
                for (TimelineEvent e : entity.getEvents()) {
                    if (min > e.getTimestamp()) {
                        min = e.getTimestamp();
                    }
                }
                existingEntity.setStartTime(min);
                needsPut = true;
            }
        }
        if (entity.getPrimaryFilters() != null) {
            if (existingEntity.getPrimaryFilters() == null) {
                existingEntity.setPrimaryFilters(new HashMap<String, Set<Object>>());
            }
            for (Entry<String, Set<Object>> pf : entity.getPrimaryFilters().entrySet()) {
                for (Object pfo : pf.getValue()) {
                    existingEntity.addPrimaryFilter(pf.getKey(), KeyValueBasedTimelineStoreUtils.compactNumber(pfo));
                    needsPut = true;
                }
            }
        }
        if (entity.getOtherInfo() != null) {
            if (existingEntity.getOtherInfo() == null) {
                existingEntity.setOtherInfo(new HashMap<String, Object>());
            }
            for (Entry<String, Object> info : entity.getOtherInfo().entrySet()) {
                existingEntity.addOtherInfo(info.getKey(), KeyValueBasedTimelineStoreUtils.compactNumber(info.getValue()));
                needsPut = true;
            }
        }
        if (needsPut) {
            entities.put(entityId, existingEntity);
        }
        // relate it to other entities
        if (entity.getRelatedEntities() == null) {
            continue;
        }
        for (Entry<String, Set<String>> partRelatedEntities : entity.getRelatedEntities().entrySet()) {
            if (partRelatedEntities == null) {
                continue;
            }
            for (String idStr : partRelatedEntities.getValue()) {
                EntityIdentifier relatedEntityId = new EntityIdentifier(idStr, partRelatedEntities.getKey());
                TimelineEntity relatedEntity = entities.get(relatedEntityId);
                if (relatedEntity != null) {
                    if (relatedEntity.getDomainId().equals(existingEntity.getDomainId())) {
                        relatedEntity.addRelatedEntity(existingEntity.getEntityType(), existingEntity.getEntityId());
                        entities.put(relatedEntityId, relatedEntity);
                    } else {
                        // in this case the entity will be put, but the relation will be
                        // ignored
                        TimelinePutError error = new TimelinePutError();
                        error.setEntityType(existingEntity.getEntityType());
                        error.setEntityId(existingEntity.getEntityId());
                        error.setErrorCode(TimelinePutError.FORBIDDEN_RELATION);
                        response.addError(error);
                    }
                } else {
                    relatedEntity = new TimelineEntity();
                    relatedEntity.setEntityId(relatedEntityId.getId());
                    relatedEntity.setEntityType(relatedEntityId.getType());
                    relatedEntity.setStartTime(existingEntity.getStartTime());
                    relatedEntity.addRelatedEntity(existingEntity.getEntityType(), existingEntity.getEntityId());
                    relatedEntity.setDomainId(existingEntity.getDomainId());
                    entities.put(relatedEntityId, relatedEntity);
                    entityInsertTimes.put(relatedEntityId, System.currentTimeMillis());
                }
            }
        }
    }
    return response;
}
Also used : TimelineEvent(org.apache.hadoop.yarn.api.records.timeline.TimelineEvent) SortedSet(java.util.SortedSet) HashSet(java.util.HashSet) EnumSet(java.util.EnumSet) Set(java.util.Set) TimelinePutResponse(org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse) TimelineEntity(org.apache.hadoop.yarn.api.records.timeline.TimelineEntity) TimelinePutError(org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError)

Example 48 with Set

use of java.util.Set in project hadoop by apache.

the class NMClientAsyncImpl method serviceStart.

@Override
protected void serviceStart() throws Exception {
    client.start();
    ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat(this.getClass().getName() + " #%d").setDaemon(true).build();
    // Start with a default core-pool size and change it dynamically.
    int initSize = Math.min(INITIAL_THREAD_POOL_SIZE, maxThreadPoolSize);
    threadPool = new ThreadPoolExecutor(initSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
    eventDispatcherThread = new Thread() {

        @Override
        public void run() {
            ContainerEvent event = null;
            Set<String> allNodes = new HashSet<String>();
            while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
                try {
                    event = events.take();
                } catch (InterruptedException e) {
                    if (!stopped.get()) {
                        LOG.error("Returning, thread interrupted", e);
                    }
                    return;
                }
                allNodes.add(event.getNodeId().toString());
                int threadPoolSize = threadPool.getCorePoolSize();
                // limit yet.
                if (threadPoolSize != maxThreadPoolSize) {
                    // nodes where containers will run at *this* point of time. This is
                    // *not* the cluster size and doesn't need to be.
                    int nodeNum = allNodes.size();
                    int idealThreadPoolSize = Math.min(maxThreadPoolSize, nodeNum);
                    if (threadPoolSize < idealThreadPoolSize) {
                        // Bump up the pool size to idealThreadPoolSize +
                        // INITIAL_POOL_SIZE, the later is just a buffer so we are not
                        // always increasing the pool-size
                        int newThreadPoolSize = Math.min(maxThreadPoolSize, idealThreadPoolSize + INITIAL_THREAD_POOL_SIZE);
                        LOG.info("Set NMClientAsync thread pool size to " + newThreadPoolSize + " as the number of nodes to talk to is " + nodeNum);
                        threadPool.setCorePoolSize(newThreadPoolSize);
                    }
                }
                // the events from the queue are handled in parallel with a thread
                // pool
                threadPool.execute(getContainerEventProcessor(event));
            // TODO: Group launching of multiple containers to a single
            // NodeManager into a single connection
            }
        }
    };
    eventDispatcherThread.setName("Container  Event Dispatcher");
    eventDispatcherThread.setDaemon(false);
    eventDispatcherThread.start();
    super.serviceStart();
}
Also used : ThreadFactory(java.util.concurrent.ThreadFactory) HashSet(java.util.HashSet) EnumSet(java.util.EnumSet) Set(java.util.Set) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue)

Example 49 with Set

use of java.util.Set in project hadoop by apache.

the class CommonNodeLabelsManager method checkRemoveLabelsFromNode.

protected void checkRemoveLabelsFromNode(Map<NodeId, Set<String>> removeLabelsFromNode) throws IOException {
    // check all labels being added existed
    Set<String> knownLabels = labelCollections.keySet();
    for (Entry<NodeId, Set<String>> entry : removeLabelsFromNode.entrySet()) {
        NodeId nodeId = entry.getKey();
        Set<String> labels = entry.getValue();
        if (!knownLabels.containsAll(labels)) {
            String msg = "Not all labels being removed contained by known " + "label collections, please check" + ", removed labels=[" + StringUtils.join(labels, ",") + "]";
            LOG.error(msg);
            throw new IOException(msg);
        }
        Set<String> originalLabels = null;
        boolean nodeExisted = false;
        if (WILDCARD_PORT != nodeId.getPort()) {
            Node nm = getNMInNodeSet(nodeId);
            if (nm != null) {
                originalLabels = nm.labels;
                nodeExisted = true;
            }
        } else {
            Host host = nodeCollections.get(nodeId.getHost());
            if (null != host) {
                originalLabels = host.labels;
                nodeExisted = true;
            }
        }
        if (!nodeExisted) {
            String msg = "Try to remove labels from NM=" + nodeId + ", but the NM doesn't existed";
            LOG.error(msg);
            throw new IOException(msg);
        }
        // the labels will never be null
        if (labels.isEmpty()) {
            continue;
        }
        // because when a Node is created, Node.labels can be null.
        if (originalLabels == null || !originalLabels.containsAll(labels)) {
            String msg = "Try to remove labels = [" + StringUtils.join(labels, ",") + "], but not all labels contained by NM=" + nodeId;
            LOG.error(msg);
            throw new IOException(msg);
        }
    }
}
Also used : HashSet(java.util.HashSet) EnumSet(java.util.EnumSet) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) NodeId(org.apache.hadoop.yarn.api.records.NodeId) IOException(java.io.IOException)

Example 50 with Set

use of java.util.Set in project hadoop by apache.

the class CommonNodeLabelsManager method checkAddLabelsToNode.

protected void checkAddLabelsToNode(Map<NodeId, Set<String>> addedLabelsToNode) throws IOException {
    if (null == addedLabelsToNode || addedLabelsToNode.isEmpty()) {
        return;
    }
    // check all labels being added existed
    Set<String> knownLabels = labelCollections.keySet();
    for (Entry<NodeId, Set<String>> entry : addedLabelsToNode.entrySet()) {
        NodeId nodeId = entry.getKey();
        Set<String> labels = entry.getValue();
        if (!knownLabels.containsAll(labels)) {
            String msg = "Not all labels being added contained by known " + "label collections, please check" + ", added labels=[" + StringUtils.join(labels, ",") + "]";
            LOG.error(msg);
            throw new IOException(msg);
        }
        // same host
        if (!labels.isEmpty()) {
            Set<String> newLabels = new HashSet<String>(getLabelsByNode(nodeId));
            newLabels.addAll(labels);
            // we don't allow number of labels on a node > 1 after added labels
            if (newLabels.size() > 1) {
                String msg = String.format("%d labels specified on host=%s after add labels to node" + ", please note that we do not support specifying multiple" + " labels on a single host for now.", newLabels.size(), nodeId.getHost());
                LOG.error(msg);
                throw new IOException(msg);
            }
        }
    }
}
Also used : HashSet(java.util.HashSet) EnumSet(java.util.EnumSet) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) NodeId(org.apache.hadoop.yarn.api.records.NodeId) IOException(java.io.IOException) HashSet(java.util.HashSet)

Aggregations

Set (java.util.Set)6789 HashSet (java.util.HashSet)4372 HashMap (java.util.HashMap)2090 Map (java.util.Map)1865 Iterator (java.util.Iterator)1774 ArrayList (java.util.ArrayList)1113 List (java.util.List)980 Test (org.junit.Test)920 TreeSet (java.util.TreeSet)536 IOException (java.io.IOException)501 SSOException (com.iplanet.sso.SSOException)467 LinkedHashSet (java.util.LinkedHashSet)418 SMSException (com.sun.identity.sm.SMSException)347 IdRepoException (com.sun.identity.idm.IdRepoException)268 Collection (java.util.Collection)259 ImmutableSet (com.google.common.collect.ImmutableSet)256 File (java.io.File)245 SSOToken (com.iplanet.sso.SSOToken)226 Collectors (java.util.stream.Collectors)219 Test (org.testng.annotations.Test)209