Search in sources :

Example 31 with IdentityHashMap

use of java.util.IdentityHashMap in project gerrit by GerritCodeReview.

the class SectionSortCache method sort.

void sort(String ref, List<AccessSection> sections) {
    final int cnt = sections.size();
    if (cnt <= 1) {
        return;
    }
    EntryKey key = EntryKey.create(ref, sections);
    EntryVal val = cache.getIfPresent(key);
    if (val != null) {
        int[] srcIdx = val.order;
        if (srcIdx != null) {
            AccessSection[] srcList = copy(sections);
            for (int i = 0; i < cnt; i++) {
                sections.set(i, srcList[srcIdx[i]]);
            }
        } else {
        // Identity transform. No sorting is required.
        }
    } else {
        boolean poison = false;
        IdentityHashMap<AccessSection, Integer> srcMap = new IdentityHashMap<>();
        for (int i = 0; i < cnt; i++) {
            poison |= srcMap.put(sections.get(i), i) != null;
        }
        Collections.sort(sections, new MostSpecificComparator(ref));
        int[] srcIdx;
        if (isIdentityTransform(sections, srcMap)) {
            srcIdx = null;
        } else {
            srcIdx = new int[cnt];
            for (int i = 0; i < cnt; i++) {
                srcIdx[i] = srcMap.get(sections.get(i));
            }
        }
        if (poison) {
            log.error("Received duplicate AccessSection instances, not caching sort");
        } else {
            cache.put(key, new EntryVal(srcIdx));
        }
    }
}
Also used : MostSpecificComparator(com.google.gerrit.server.util.MostSpecificComparator) IdentityHashMap(java.util.IdentityHashMap) AccessSection(com.google.gerrit.common.data.AccessSection)

Example 32 with IdentityHashMap

use of java.util.IdentityHashMap in project lucene-solr by apache.

the class RamUsageTester method measureObjectSize.

/*
   * Non-recursive version of object descend. This consumes more memory than recursive in-depth
   * traversal but prevents stack overflows on long chains of objects
   * or complex graphs (a max. recursion depth on my machine was ~5000 objects linked in a chain
   * so not too much).
   */
private static long measureObjectSize(Object root, Accumulator accumulator) {
    // Objects seen so far.
    final Set<Object> seen = Collections.newSetFromMap(new IdentityHashMap<Object, Boolean>());
    // Class cache with reference Field and precalculated shallow size. 
    final IdentityHashMap<Class<?>, ClassCache> classCache = new IdentityHashMap<>();
    // Stack of objects pending traversal. Recursion caused stack overflows. 
    final ArrayList<Object> stack = new ArrayList<>();
    stack.add(root);
    long totalSize = 0;
    while (!stack.isEmpty()) {
        final Object ob = stack.remove(stack.size() - 1);
        if (ob == null || seen.contains(ob)) {
            continue;
        }
        seen.add(ob);
        final Class<?> obClazz = ob.getClass();
        assert obClazz != null : "jvm bug detected (Object.getClass() == null). please report this to your vendor";
        if (obClazz.isArray()) {
            /*
         * Consider an array, possibly of primitive types. Push any of its references to
         * the processing stack and accumulate this array's shallow size. 
         */
            final long shallowSize = RamUsageEstimator.shallowSizeOf(ob);
            final int len = Array.getLength(ob);
            final List<Object> values;
            Class<?> componentClazz = obClazz.getComponentType();
            if (componentClazz.isPrimitive()) {
                values = Collections.emptyList();
            } else {
                values = new AbstractList<Object>() {

                    @Override
                    public Object get(int index) {
                        return Array.get(ob, index);
                    }

                    @Override
                    public int size() {
                        return len;
                    }
                };
            }
            totalSize += accumulator.accumulateArray(ob, shallowSize, values, stack);
        } else {
            /*
         * Consider an object. Push any references it has to the processing stack
         * and accumulate this object's shallow size. 
         */
            try {
                ClassCache cachedInfo = classCache.get(obClazz);
                if (cachedInfo == null) {
                    classCache.put(obClazz, cachedInfo = createCacheEntry(obClazz));
                }
                boolean needsReflection = true;
                if (Constants.JRE_IS_MINIMUM_JAVA9 && obClazz.getName().startsWith("java.")) {
                    // Java 9: Best guess for some known types, as we cannot precisely look into runtime classes:
                    final ToLongFunction<Object> func = SIMPLE_TYPES.get(obClazz);
                    if (func != null) {
                        // some simple type like String where the size is easy to get from public properties
                        totalSize += accumulator.accumulateObject(ob, cachedInfo.alignedShallowInstanceSize + func.applyAsLong(ob), Collections.emptyMap(), stack);
                        needsReflection = false;
                    } else if (ob instanceof Iterable) {
                        final List<Object> values = StreamSupport.stream(((Iterable<?>) ob).spliterator(), false).collect(Collectors.toList());
                        totalSize += accumulator.accumulateArray(ob, cachedInfo.alignedShallowInstanceSize + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER, values, stack);
                        needsReflection = false;
                    } else if (ob instanceof Map) {
                        final List<Object> values = ((Map<?, ?>) ob).entrySet().stream().flatMap(e -> Stream.of(e.getKey(), e.getValue())).collect(Collectors.toList());
                        totalSize += accumulator.accumulateArray(ob, cachedInfo.alignedShallowInstanceSize + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER, values, stack);
                        totalSize += RamUsageEstimator.NUM_BYTES_ARRAY_HEADER;
                        needsReflection = false;
                    }
                }
                if (needsReflection) {
                    final Map<Field, Object> fieldValues = new HashMap<>();
                    for (Field f : cachedInfo.referenceFields) {
                        fieldValues.put(f, f.get(ob));
                    }
                    totalSize += accumulator.accumulateObject(ob, cachedInfo.alignedShallowInstanceSize, fieldValues, stack);
                }
            } catch (IllegalAccessException e) {
                // this should never happen as we enabled setAccessible().
                throw new RuntimeException("Reflective field access failed?", e);
            }
        }
    }
    // Help the GC (?).
    seen.clear();
    stack.clear();
    classCache.clear();
    return totalSize;
}
Also used : Array(java.lang.reflect.Array) IdentityHashMap(java.util.IdentityHashMap) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Collection(java.util.Collection) AbstractList(java.util.AbstractList) Set(java.util.Set) HashMap(java.util.HashMap) Field(java.lang.reflect.Field) PrivilegedAction(java.security.PrivilegedAction) Collectors(java.util.stream.Collectors) File(java.io.File) ArrayList(java.util.ArrayList) List(java.util.List) Stream(java.util.stream.Stream) Modifier(java.lang.reflect.Modifier) Map(java.util.Map) StreamSupport(java.util.stream.StreamSupport) AccessController(java.security.AccessController) ToLongFunction(java.util.function.ToLongFunction) Path(java.nio.file.Path) Collections(java.util.Collections) IdentityHashMap(java.util.IdentityHashMap) HashMap(java.util.HashMap) IdentityHashMap(java.util.IdentityHashMap) ArrayList(java.util.ArrayList) Field(java.lang.reflect.Field) AbstractList(java.util.AbstractList) ArrayList(java.util.ArrayList) List(java.util.List) IdentityHashMap(java.util.IdentityHashMap) HashMap(java.util.HashMap) Map(java.util.Map)

Example 33 with IdentityHashMap

use of java.util.IdentityHashMap in project lucene-solr by apache.

the class StatsField method computeBaseDocSet.

/**
   * Computes a base {@link DocSet} for the current request to be used
   * when computing global stats for the local index.
   *
   * This is typically the same as the main DocSet for the {@link ResponseBuilder}
   * unless {@link CommonParams#TAG tag}ged filter queries have been excluded using 
   * the {@link CommonParams#EXCLUDE ex} local param
   */
public DocSet computeBaseDocSet() throws IOException {
    DocSet docs = rb.getResults().docSet;
    Map<?, ?> tagMap = (Map<?, ?>) rb.req.getContext().get("tags");
    if (excludeTagList.isEmpty() || null == tagMap) {
        // aren't any tagged filters to exclude anyway.
        return docs;
    }
    IdentityHashMap<Query, Boolean> excludeSet = new IdentityHashMap<Query, Boolean>();
    for (String excludeTag : excludeTagList) {
        Object olst = tagMap.get(excludeTag);
        // tagMap has entries of List<String,List<QParser>>, but subject to change in the future
        if (!(olst instanceof Collection))
            continue;
        for (Object o : (Collection<?>) olst) {
            if (!(o instanceof QParser))
                continue;
            QParser qp = (QParser) o;
            try {
                excludeSet.put(qp.getQuery(), Boolean.TRUE);
            } catch (SyntaxError e) {
                // failed when attempting to execute the query, but just in case...
                throw new SolrException(ErrorCode.BAD_REQUEST, "Excluded query can't be parsed: " + originalParam + " due to: " + e.getMessage(), e);
            }
        }
    }
    if (excludeSet.size() == 0)
        return docs;
    List<Query> qlist = new ArrayList<Query>();
    // add the base query
    if (!excludeSet.containsKey(rb.getQuery())) {
        qlist.add(rb.getQuery());
    }
    // add the filters
    if (rb.getFilters() != null) {
        for (Query q : rb.getFilters()) {
            if (!excludeSet.containsKey(q)) {
                qlist.add(q);
            }
        }
    }
    // get the new base docset for this facet
    return searcher.getDocSet(qlist);
}
Also used : Query(org.apache.lucene.search.Query) FunctionQuery(org.apache.lucene.queries.function.FunctionQuery) IdentityHashMap(java.util.IdentityHashMap) ArrayList(java.util.ArrayList) SyntaxError(org.apache.solr.search.SyntaxError) QParser(org.apache.solr.search.QParser) Collection(java.util.Collection) Map(java.util.Map) IdentityHashMap(java.util.IdentityHashMap) DocSet(org.apache.solr.search.DocSet) SolrException(org.apache.solr.common.SolrException)

Example 34 with IdentityHashMap

use of java.util.IdentityHashMap in project lucene-solr by apache.

the class MockDirectoryWrapper method crash.

/** Simulates a crash of OS or machine by overwriting
   *  unsynced files. */
public synchronized void crash() throws IOException {
    openFiles = new HashMap<>();
    openFilesForWrite = new HashSet<>();
    openFilesDeleted = new HashSet<>();
    // first force-close all files, so we can corrupt on windows etc.
    // clone the file map, as these guys want to remove themselves on close.
    Map<Closeable, Exception> m = new IdentityHashMap<>(openFileHandles);
    for (Closeable f : m.keySet()) {
        try {
            f.close();
        } catch (Exception ignored) {
        }
    }
    corruptFiles(unSyncedFiles);
    crashed = true;
    unSyncedFiles = new HashSet<>();
}
Also used : IdentityHashMap(java.util.IdentityHashMap) Closeable(java.io.Closeable) NoSuchFileException(java.nio.file.NoSuchFileException) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException)

Example 35 with IdentityHashMap

use of java.util.IdentityHashMap in project jackrabbit by apache.

the class WeightedHighlighter method mergeFragments.

@Override
protected String mergeFragments(TermVectorOffsetInfo[] offsets, String text, String excerptStart, String excerptEnd, String fragmentStart, String fragmentEnd, String hlStart, String hlEnd, int maxFragments, int surround) throws IOException {
    if (offsets == null || offsets.length == 0) {
        // nothing to highlight
        return createDefaultExcerpt(text, excerptStart, excerptEnd, fragmentStart, fragmentEnd, surround * 2);
    }
    PriorityQueue<FragmentInfo> bestFragments = new FragmentInfoPriorityQueue(maxFragments);
    for (int i = 0; i < offsets.length; i++) {
        if (offsets[i].getEndOffset() <= text.length()) {
            FragmentInfo fi = new FragmentInfo(offsets[i], surround * 2);
            for (int j = i + 1; j < offsets.length; j++) {
                if (offsets[j].getEndOffset() > text.length()) {
                    break;
                }
                if (!fi.add(offsets[j], text)) {
                    break;
                }
            }
            bestFragments.insertWithOverflow(fi);
        }
    }
    if (bestFragments.size() == 0) {
        return createDefaultExcerpt(text, excerptStart, excerptEnd, fragmentStart, fragmentEnd, surround * 2);
    }
    // retrieve fragment infos from queue and fill into list, least
    // fragment comes out first
    List<FragmentInfo> infos = new LinkedList<FragmentInfo>();
    while (bestFragments.size() > 0) {
        FragmentInfo fi = (FragmentInfo) bestFragments.pop();
        infos.add(0, fi);
    }
    Map<TermVectorOffsetInfo, Object> offsetInfos = new IdentityHashMap<TermVectorOffsetInfo, Object>();
    // remove overlapping fragment infos
    Iterator<FragmentInfo> it = infos.iterator();
    while (it.hasNext()) {
        FragmentInfo fi = it.next();
        boolean overlap = false;
        Iterator<TermVectorOffsetInfo> fit = fi.iterator();
        while (fit.hasNext() && !overlap) {
            TermVectorOffsetInfo oi = fit.next();
            if (offsetInfos.containsKey(oi)) {
                overlap = true;
            }
        }
        if (overlap) {
            it.remove();
        } else {
            Iterator<TermVectorOffsetInfo> oit = fi.iterator();
            while (oit.hasNext()) {
                offsetInfos.put(oit.next(), null);
            }
        }
    }
    // create excerpts
    StringBuffer sb = new StringBuffer(excerptStart);
    it = infos.iterator();
    while (it.hasNext()) {
        FragmentInfo fi = it.next();
        sb.append(fragmentStart);
        int limit = Math.max(0, fi.getStartOffset() / 2 + fi.getEndOffset() / 2 - surround);
        int len = startFragment(sb, text, fi.getStartOffset(), limit);
        TermVectorOffsetInfo lastOffsetInfo = null;
        Iterator<TermVectorOffsetInfo> fIt = fi.iterator();
        while (fIt.hasNext()) {
            TermVectorOffsetInfo oi = fIt.next();
            if (lastOffsetInfo != null) {
                // fill in text between terms
                sb.append(escape(text.substring(lastOffsetInfo.getEndOffset(), oi.getStartOffset())));
            }
            sb.append(hlStart);
            sb.append(escape(text.substring(oi.getStartOffset(), oi.getEndOffset())));
            sb.append(hlEnd);
            lastOffsetInfo = oi;
        }
        limit = Math.min(text.length(), fi.getStartOffset() - len + (surround * 2));
        endFragment(sb, text, fi.getEndOffset(), limit);
        sb.append(fragmentEnd);
    }
    sb.append(excerptEnd);
    return sb.toString();
}
Also used : IdentityHashMap(java.util.IdentityHashMap) LinkedList(java.util.LinkedList) TermVectorOffsetInfo(org.apache.lucene.index.TermVectorOffsetInfo)

Aggregations

IdentityHashMap (java.util.IdentityHashMap)142 Map (java.util.Map)44 HashMap (java.util.HashMap)42 ArrayList (java.util.ArrayList)31 HashSet (java.util.HashSet)20 LinkedHashMap (java.util.LinkedHashMap)18 Collection (java.util.Collection)16 Set (java.util.Set)16 TreeMap (java.util.TreeMap)16 Iterator (java.util.Iterator)14 AbstractMap (java.util.AbstractMap)13 List (java.util.List)11 Test (org.junit.Test)11 DependencyNode (org.sonatype.aether.graph.DependencyNode)10 WeakHashMap (java.util.WeakHashMap)8 LinkedList (java.util.LinkedList)7 TreeSet (java.util.TreeSet)7 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)7 Tree (edu.stanford.nlp.trees.Tree)6 IOException (java.io.IOException)6