Search in sources :

Example 76 with ArrayDeque

use of java.util.ArrayDeque in project intellij-community by JetBrains.

the class TraitTransformationSupport method process.

private static void process(@NotNull TransformationContext context, @NotNull PairConsumer<PsiClass, PsiSubstitutor> consumer) {
    Deque<Pair<PsiClass, PsiSubstitutor>> stack = new ArrayDeque<>();
    for (PsiClassType superType : context.getSuperTypes()) {
        ClassResolveResult result = superType.resolveGenerics();
        PsiClass superClass = result.getElement();
        if (superClass == null)
            continue;
        stack.push(Pair.create(superClass, result.getSubstitutor()));
    }
    Set<PsiClass> visited = ContainerUtil.newHashSet();
    while (!stack.isEmpty()) {
        Pair<PsiClass, PsiSubstitutor> current = stack.pop();
        PsiClass currentClass = current.first;
        PsiSubstitutor currentSubstitutor = current.second;
        if (!visited.add(currentClass))
            continue;
        if (GrTraitUtil.isTrait(currentClass)) {
            consumer.consume(currentClass, currentSubstitutor);
        }
        for (PsiClass superClass : currentClass.getSupers()) {
            PsiSubstitutor superSubstitutor = TypeConversionUtil.getSuperClassSubstitutor(superClass, currentClass, currentSubstitutor);
            stack.push(Pair.create(superClass, superSubstitutor));
        }
    }
}
Also used : ClassResolveResult(com.intellij.psi.PsiClassType.ClassResolveResult) ArrayDeque(java.util.ArrayDeque) Pair(com.intellij.openapi.util.Pair)

Example 77 with ArrayDeque

use of java.util.ArrayDeque in project asterixdb by apache.

the class RecordRemoveFieldsTypeComputer method computeType.

@Override
public IAType computeType(ILogicalExpression expression, IVariableTypeEnvironment env, IMetadataProvider<?, ?> metadataProvider) throws AlgebricksException {
    AbstractFunctionCallExpression funcExpr = (AbstractFunctionCallExpression) expression;
    String funcName = funcExpr.getFunctionIdentifier().getName();
    IAType type0 = (IAType) env.getType(funcExpr.getArguments().get(0).getValue());
    List<List<String>> pathList = new ArrayList<>();
    Set<String> fieldNameSet = new HashSet<>();
    Deque<String> fieldPathStack = new ArrayDeque<>();
    ARecordType inputRecordType = getRecordTypeFromType(funcName, type0);
    if (inputRecordType == null) {
        return BuiltinType.ANY;
    }
    AbstractLogicalExpression arg1 = (AbstractLogicalExpression) funcExpr.getArguments().get(1).getValue();
    IAType inputListType = (IAType) env.getType(arg1);
    AOrderedListType inputOrderedListType = TypeComputeUtils.extractOrderedListType(inputListType);
    if (inputOrderedListType == null) {
        throw new TypeMismatchException(funcName, 1, inputListType.getTypeTag(), ATypeTag.ARRAY);
    }
    ATypeTag tt = inputOrderedListType.getItemType().getTypeTag();
    if (tt == ATypeTag.STRING) {
        // If top-fieldlist
        if (setFieldNameSet(arg1, fieldNameSet)) {
            return buildOutputType(fieldPathStack, inputRecordType, fieldNameSet, pathList);
        } else {
            return DefaultOpenFieldType.NESTED_OPEN_RECORD_TYPE;
        }
    } else {
        // tt == ATypeTag.ANY, meaning the list is nested
        computeTypeFromNonConstantExpression(funcName, arg1, fieldNameSet, pathList);
        IAType resultType = buildOutputType(fieldPathStack, inputRecordType, fieldNameSet, pathList);
        return resultType;
    }
}
Also used : AbstractLogicalExpression(org.apache.hyracks.algebricks.core.algebra.expressions.AbstractLogicalExpression) AbstractFunctionCallExpression(org.apache.hyracks.algebricks.core.algebra.expressions.AbstractFunctionCallExpression) AOrderedListType(org.apache.asterix.om.types.AOrderedListType) TypeMismatchException(org.apache.asterix.om.exceptions.TypeMismatchException) ArrayList(java.util.ArrayList) AString(org.apache.asterix.om.base.AString) ArrayDeque(java.util.ArrayDeque) ATypeTag(org.apache.asterix.om.types.ATypeTag) AOrderedList(org.apache.asterix.om.base.AOrderedList) ArrayList(java.util.ArrayList) List(java.util.List) ARecordType(org.apache.asterix.om.types.ARecordType) IAType(org.apache.asterix.om.types.IAType) HashSet(java.util.HashSet)

Example 78 with ArrayDeque

use of java.util.ArrayDeque in project asterixdb by apache.

the class TypeUtil method createEnforcedType.

/**
     * Merges typed index fields with specified recordType, allowing indexed fields to be optional.
     * I.e. the type { "personId":int32, "name": string, "address" : { "street": string } } with typed indexes
     * on age:int32, address.state:string will be merged into type { "personId":int32, "name": string,
     * "age": int32? "address" : { "street": string, "state": string? } } Used by open indexes to enforce
     * the type of an indexed record
     */
public static Pair<ARecordType, ARecordType> createEnforcedType(ARecordType recordType, ARecordType metaType, List<Index> indexes) throws AlgebricksException {
    ARecordType enforcedRecordType = recordType;
    ARecordType enforcedMetaType = metaType;
    for (Index index : indexes) {
        if (!index.isSecondaryIndex() || !index.isEnforcingKeyFileds()) {
            continue;
        }
        if (index.hasMetaFields()) {
            throw new AlgebricksException("Indexing an open field is only supported on the record part");
        }
        for (int i = 0; i < index.getKeyFieldNames().size(); i++) {
            Deque<Pair<ARecordType, String>> nestedTypeStack = new ArrayDeque<>();
            List<String> splits = index.getKeyFieldNames().get(i);
            ARecordType nestedFieldType = enforcedRecordType;
            boolean openRecords = false;
            String bridgeName = nestedFieldType.getTypeName();
            int j;
            // Build the stack for the enforced type
            for (j = 1; j < splits.size(); j++) {
                nestedTypeStack.push(new Pair<>(nestedFieldType, splits.get(j - 1)));
                bridgeName = nestedFieldType.getTypeName();
                nestedFieldType = (ARecordType) enforcedRecordType.getSubFieldType(splits.subList(0, j));
                if (nestedFieldType == null) {
                    openRecords = true;
                    break;
                }
            }
            if (openRecords) {
                // create the smallest record
                enforcedRecordType = new ARecordType(splits.get(splits.size() - 2), new String[] { splits.get(splits.size() - 1) }, new IAType[] { AUnionType.createUnknownableType(index.getKeyFieldTypes().get(i)) }, true);
                // create the open part of the nested field
                for (int k = splits.size() - 3; k > (j - 2); k--) {
                    enforcedRecordType = new ARecordType(splits.get(k), new String[] { splits.get(k + 1) }, new IAType[] { AUnionType.createUnknownableType(enforcedRecordType) }, true);
                }
                // Bridge the gap
                Pair<ARecordType, String> gapPair = nestedTypeStack.pop();
                ARecordType parent = gapPair.first;
                IAType[] parentFieldTypes = ArrayUtils.addAll(parent.getFieldTypes().clone(), new IAType[] { AUnionType.createUnknownableType(enforcedRecordType) });
                enforcedRecordType = new ARecordType(bridgeName, ArrayUtils.addAll(parent.getFieldNames(), enforcedRecordType.getTypeName()), parentFieldTypes, true);
            } else {
                //Schema is closed all the way to the field
                //enforced fields are either null or strongly typed
                Map<String, IAType> recordNameTypesMap = TypeUtil.createRecordNameTypeMap(nestedFieldType);
                // if a an enforced field already exists and the type is correct
                IAType enforcedFieldType = recordNameTypesMap.get(splits.get(splits.size() - 1));
                if (enforcedFieldType != null && enforcedFieldType.getTypeTag() == ATypeTag.UNION && ((AUnionType) enforcedFieldType).isUnknownableType()) {
                    enforcedFieldType = ((AUnionType) enforcedFieldType).getActualType();
                }
                if (enforcedFieldType != null && !ATypeHierarchy.canPromote(enforcedFieldType.getTypeTag(), index.getKeyFieldTypes().get(i).getTypeTag())) {
                    throw new AlgebricksException("Cannot enforce field " + index.getKeyFieldNames().get(i) + " to have type " + index.getKeyFieldTypes().get(i));
                }
                if (enforcedFieldType == null) {
                    recordNameTypesMap.put(splits.get(splits.size() - 1), AUnionType.createUnknownableType(index.getKeyFieldTypes().get(i)));
                }
                enforcedRecordType = new ARecordType(nestedFieldType.getTypeName(), recordNameTypesMap.keySet().toArray(new String[recordNameTypesMap.size()]), recordNameTypesMap.values().toArray(new IAType[recordNameTypesMap.size()]), nestedFieldType.isOpen());
            }
            // Create the enforced type for the nested fields in the schema, from the ground up
            if (!nestedTypeStack.isEmpty()) {
                while (!nestedTypeStack.isEmpty()) {
                    Pair<ARecordType, String> nestedTypePair = nestedTypeStack.pop();
                    ARecordType nestedRecType = nestedTypePair.first;
                    IAType[] nestedRecTypeFieldTypes = nestedRecType.getFieldTypes().clone();
                    nestedRecTypeFieldTypes[nestedRecType.getFieldIndex(nestedTypePair.second)] = enforcedRecordType;
                    enforcedRecordType = new ARecordType(nestedRecType.getTypeName() + "_enforced", nestedRecType.getFieldNames(), nestedRecTypeFieldTypes, nestedRecType.isOpen());
                }
            }
        }
    }
    return new Pair<>(enforcedRecordType, enforcedMetaType);
}
Also used : AlgebricksException(org.apache.hyracks.algebricks.common.exceptions.AlgebricksException) Index(org.apache.asterix.metadata.entities.Index) ArrayDeque(java.util.ArrayDeque) ARecordType(org.apache.asterix.om.types.ARecordType) Pair(org.apache.hyracks.algebricks.common.utils.Pair) IAType(org.apache.asterix.om.types.IAType)

Example 79 with ArrayDeque

use of java.util.ArrayDeque in project lucene-solr by apache.

the class NumericFacets method getCountsSingleValue.

private static NamedList<Integer> getCountsSingleValue(SolrIndexSearcher searcher, DocSet docs, String fieldName, int offset, int limit, int mincount, boolean missing, String sort) throws IOException {
    boolean zeros = mincount <= 0;
    mincount = Math.max(mincount, 1);
    final SchemaField sf = searcher.getSchema().getField(fieldName);
    final FieldType ft = sf.getType();
    final NumberType numericType = ft.getNumberType();
    if (numericType == null) {
        throw new IllegalStateException();
    }
    // We don't return zeros when using PointFields or when index=false
    zeros = zeros && !ft.isPointField() && sf.indexed();
    final List<LeafReaderContext> leaves = searcher.getIndexReader().leaves();
    // 1. accumulate
    final HashTable hashTable = new HashTable(true);
    final Iterator<LeafReaderContext> ctxIt = leaves.iterator();
    LeafReaderContext ctx = null;
    NumericDocValues longs = null;
    int missingCount = 0;
    for (DocIterator docsIt = docs.iterator(); docsIt.hasNext(); ) {
        final int doc = docsIt.nextDoc();
        if (ctx == null || doc >= ctx.docBase + ctx.reader().maxDoc()) {
            do {
                ctx = ctxIt.next();
            } while (ctx == null || doc >= ctx.docBase + ctx.reader().maxDoc());
            assert doc >= ctx.docBase;
            switch(numericType) {
                case LONG:
                case DATE:
                case INTEGER:
                    // Long, Date and Integer
                    longs = DocValues.getNumeric(ctx.reader(), fieldName);
                    break;
                case FLOAT:
                    // TODO: this bit flipping should probably be moved to tie-break in the PQ comparator
                    longs = new FilterNumericDocValues(DocValues.getNumeric(ctx.reader(), fieldName)) {

                        @Override
                        public long longValue() throws IOException {
                            long bits = super.longValue();
                            if (bits < 0)
                                bits ^= 0x7fffffffffffffffL;
                            return bits;
                        }
                    };
                    break;
                case DOUBLE:
                    // TODO: this bit flipping should probably be moved to tie-break in the PQ comparator
                    longs = new FilterNumericDocValues(DocValues.getNumeric(ctx.reader(), fieldName)) {

                        @Override
                        public long longValue() throws IOException {
                            long bits = super.longValue();
                            if (bits < 0)
                                bits ^= 0x7fffffffffffffffL;
                            return bits;
                        }
                    };
                    break;
                default:
                    throw new AssertionError("Unexpected type: " + numericType);
            }
        }
        int valuesDocID = longs.docID();
        if (valuesDocID < doc - ctx.docBase) {
            valuesDocID = longs.advance(doc - ctx.docBase);
        }
        if (valuesDocID == doc - ctx.docBase) {
            hashTable.add(doc, longs.longValue(), 1);
        } else {
            ++missingCount;
        }
    }
    // 2. select top-k facet values
    final int pqSize = limit < 0 ? hashTable.size : Math.min(offset + limit, hashTable.size);
    final PriorityQueue<Entry> pq;
    if (FacetParams.FACET_SORT_COUNT.equals(sort) || FacetParams.FACET_SORT_COUNT_LEGACY.equals(sort)) {
        pq = new PriorityQueue<Entry>(pqSize) {

            @Override
            protected boolean lessThan(Entry a, Entry b) {
                if (a.count < b.count || (a.count == b.count && a.bits > b.bits)) {
                    return true;
                } else {
                    return false;
                }
            }
        };
    } else {
        pq = new PriorityQueue<Entry>(pqSize) {

            @Override
            protected boolean lessThan(Entry a, Entry b) {
                return a.bits > b.bits;
            }
        };
    }
    Entry e = null;
    for (int i = 0; i < hashTable.bits.length; ++i) {
        if (hashTable.counts[i] >= mincount) {
            if (e == null) {
                e = new Entry();
            }
            e.bits = hashTable.bits[i];
            e.count = hashTable.counts[i];
            e.docID = hashTable.docIDs[i];
            e = pq.insertWithOverflow(e);
        }
    }
    // 4. build the NamedList
    final ValueSource vs = ft.getValueSource(sf, null);
    final NamedList<Integer> result = new NamedList<>();
    // to be merged with terms from the terms dict
    if (!zeros || FacetParams.FACET_SORT_COUNT.equals(sort) || FacetParams.FACET_SORT_COUNT_LEGACY.equals(sort)) {
        // Only keep items we're interested in
        final Deque<Entry> counts = new ArrayDeque<>();
        while (pq.size() > offset) {
            counts.addFirst(pq.pop());
        }
        // Entries from the PQ first, then using the terms dictionary
        for (Entry entry : counts) {
            final int readerIdx = ReaderUtil.subIndex(entry.docID, leaves);
            final FunctionValues values = vs.getValues(Collections.emptyMap(), leaves.get(readerIdx));
            result.add(values.strVal(entry.docID - leaves.get(readerIdx).docBase), entry.count);
        }
        if (zeros && (limit < 0 || result.size() < limit)) {
            // need to merge with the term dict
            if (!sf.indexed() && !sf.hasDocValues()) {
                throw new IllegalStateException("Cannot use " + FacetParams.FACET_MINCOUNT + "=0 on field " + sf.getName() + " which is neither indexed nor docValues");
            }
            // Add zeros until there are limit results
            final Set<String> alreadySeen = new HashSet<>();
            while (pq.size() > 0) {
                Entry entry = pq.pop();
                final int readerIdx = ReaderUtil.subIndex(entry.docID, leaves);
                final FunctionValues values = vs.getValues(Collections.emptyMap(), leaves.get(readerIdx));
                alreadySeen.add(values.strVal(entry.docID - leaves.get(readerIdx).docBase));
            }
            for (int i = 0; i < result.size(); ++i) {
                alreadySeen.add(result.getName(i));
            }
            final Terms terms = searcher.getSlowAtomicReader().terms(fieldName);
            if (terms != null) {
                final String prefixStr = TrieField.getMainValuePrefix(ft);
                final BytesRef prefix;
                if (prefixStr != null) {
                    prefix = new BytesRef(prefixStr);
                } else {
                    prefix = new BytesRef();
                }
                final TermsEnum termsEnum = terms.iterator();
                BytesRef term;
                switch(termsEnum.seekCeil(prefix)) {
                    case FOUND:
                    case NOT_FOUND:
                        term = termsEnum.term();
                        break;
                    case END:
                        term = null;
                        break;
                    default:
                        throw new AssertionError();
                }
                final CharsRefBuilder spare = new CharsRefBuilder();
                for (int skipped = hashTable.size; skipped < offset && term != null && StringHelper.startsWith(term, prefix); ) {
                    ft.indexedToReadable(term, spare);
                    final String termStr = spare.toString();
                    if (!alreadySeen.contains(termStr)) {
                        ++skipped;
                    }
                    term = termsEnum.next();
                }
                for (; term != null && StringHelper.startsWith(term, prefix) && (limit < 0 || result.size() < limit); term = termsEnum.next()) {
                    ft.indexedToReadable(term, spare);
                    final String termStr = spare.toString();
                    if (!alreadySeen.contains(termStr)) {
                        result.add(termStr, 0);
                    }
                }
            }
        }
    } else {
        // => Merge the PQ and the terms dictionary on the fly
        if (!sf.indexed()) {
            throw new IllegalStateException("Cannot use " + FacetParams.FACET_SORT + "=" + FacetParams.FACET_SORT_INDEX + " on a field which is not indexed");
        }
        final Map<String, Integer> counts = new HashMap<>();
        while (pq.size() > 0) {
            final Entry entry = pq.pop();
            final int readerIdx = ReaderUtil.subIndex(entry.docID, leaves);
            final FunctionValues values = vs.getValues(Collections.emptyMap(), leaves.get(readerIdx));
            counts.put(values.strVal(entry.docID - leaves.get(readerIdx).docBase), entry.count);
        }
        final Terms terms = searcher.getSlowAtomicReader().terms(fieldName);
        if (terms != null) {
            final String prefixStr = TrieField.getMainValuePrefix(ft);
            final BytesRef prefix;
            if (prefixStr != null) {
                prefix = new BytesRef(prefixStr);
            } else {
                prefix = new BytesRef();
            }
            final TermsEnum termsEnum = terms.iterator();
            BytesRef term;
            switch(termsEnum.seekCeil(prefix)) {
                case FOUND:
                case NOT_FOUND:
                    term = termsEnum.term();
                    break;
                case END:
                    term = null;
                    break;
                default:
                    throw new AssertionError();
            }
            final CharsRefBuilder spare = new CharsRefBuilder();
            for (int i = 0; i < offset && term != null && StringHelper.startsWith(term, prefix); ++i) {
                term = termsEnum.next();
            }
            for (; term != null && StringHelper.startsWith(term, prefix) && (limit < 0 || result.size() < limit); term = termsEnum.next()) {
                ft.indexedToReadable(term, spare);
                final String termStr = spare.toString();
                Integer count = counts.get(termStr);
                if (count == null) {
                    count = 0;
                }
                result.add(termStr, count);
            }
        }
    }
    if (missing) {
        result.add(null, missingCount);
    }
    return result;
}
Also used : FilterNumericDocValues(org.apache.lucene.index.FilterNumericDocValues) NumericDocValues(org.apache.lucene.index.NumericDocValues) SortedNumericDocValues(org.apache.lucene.index.SortedNumericDocValues) DocIterator(org.apache.solr.search.DocIterator) HashMap(java.util.HashMap) TermsEnum(org.apache.lucene.index.TermsEnum) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) CharsRefBuilder(org.apache.lucene.util.CharsRefBuilder) BytesRef(org.apache.lucene.util.BytesRef) HashSet(java.util.HashSet) NamedList(org.apache.solr.common.util.NamedList) Terms(org.apache.lucene.index.Terms) IOException(java.io.IOException) FilterNumericDocValues(org.apache.lucene.index.FilterNumericDocValues) ArrayDeque(java.util.ArrayDeque) FieldType(org.apache.solr.schema.FieldType) SchemaField(org.apache.solr.schema.SchemaField) NumberType(org.apache.solr.schema.NumberType) ValueSource(org.apache.lucene.queries.function.ValueSource) FunctionValues(org.apache.lucene.queries.function.FunctionValues)

Example 80 with ArrayDeque

use of java.util.ArrayDeque in project lucene-solr by apache.

the class TestRecovery method testLogReplay.

@Test
public void testLogReplay() throws Exception {
    try {
        DirectUpdateHandler2.commitOnClose = false;
        final Semaphore logReplay = new Semaphore(0);
        final Semaphore logReplayFinish = new Semaphore(0);
        UpdateLog.testing_logReplayHook = () -> {
            try {
                assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        };
        UpdateLog.testing_logReplayFinishHook = () -> logReplayFinish.release();
        clearIndex();
        assertU(commit());
        Deque<Long> versions = new ArrayDeque<>();
        versions.addFirst(addAndGetVersion(sdoc("id", "A1"), null));
        versions.addFirst(addAndGetVersion(sdoc("id", "A11"), null));
        versions.addFirst(addAndGetVersion(sdoc("id", "A12"), null));
        versions.addFirst(deleteByQueryAndGetVersion("id:A11", null));
        versions.addFirst(addAndGetVersion(sdoc("id", "A13"), null));
        // atomic update
        versions.addFirst(addAndGetVersion(sdoc("id", "A12", "val_i_dvo", map("set", 1)), null));
        // in-place update
        versions.addFirst(addAndGetVersion(sdoc("id", "A12", "val_i_dvo", map("set", 2)), null));
        assertJQ(req("q", "*:*"), "/response/numFound==0");
        assertJQ(req("qt", "/get", "getVersions", "" + versions.size()), "/versions==" + versions);
        h.close();
        createCore();
        // live map view
        Map<String, Metric> metrics = getMetrics();
        // Solr should kick this off now
        // h.getCore().getUpdateHandler().getUpdateLog().recoverFromLog();
        // verify that previous close didn't do a commit
        // recovery should be blocked by our hook
        assertJQ(req("q", "*:*"), "/response/numFound==0");
        // make sure we can still access versions after a restart
        assertJQ(req("qt", "/get", "getVersions", "" + versions.size()), "/versions==" + versions);
        assertEquals(UpdateLog.State.REPLAYING, h.getCore().getUpdateHandler().getUpdateLog().getState());
        // check metrics
        Gauge<Integer> state = (Gauge<Integer>) metrics.get("TLOG.state");
        assertEquals(UpdateLog.State.REPLAYING.ordinal(), state.getValue().intValue());
        Gauge<Integer> replayingLogs = (Gauge<Integer>) metrics.get("TLOG.replay.remaining.logs");
        assertTrue(replayingLogs.getValue().intValue() > 0);
        Gauge<Long> replayingDocs = (Gauge<Long>) metrics.get("TLOG.replay.remaining.bytes");
        assertTrue(replayingDocs.getValue().longValue() > 0);
        Meter replayDocs = (Meter) metrics.get("TLOG.replay.ops");
        long initialOps = replayDocs.getCount();
        // unblock recovery
        logReplay.release(1000);
        // make sure we can still access versions during recovery
        assertJQ(req("qt", "/get", "getVersions", "" + versions.size()), "/versions==" + versions);
        // wait until recovery has finished
        assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS));
        // assert that in-place update is retained
        assertJQ(req("q", "val_i_dvo:2"), "/response/numFound==1");
        assertJQ(req("q", "*:*"), "/response/numFound==3");
        assertEquals(7L, replayDocs.getCount() - initialOps);
        assertEquals(UpdateLog.State.ACTIVE.ordinal(), state.getValue().intValue());
        // make sure we can still access versions after recovery
        assertJQ(req("qt", "/get", "getVersions", "" + versions.size()), "/versions==" + versions);
        assertU(adoc("id", "A2"));
        assertU(adoc("id", "A3"));
        assertU(delI("A2"));
        assertU(adoc("id", "A4"));
        assertJQ(req("q", "*:*"), "/response/numFound==3");
        // assert that in-place update is retained
        assertJQ(req("q", "val_i_dvo:2"), "/response/numFound==1");
        h.close();
        createCore();
        // Solr should kick this off now
        // h.getCore().getUpdateHandler().getUpdateLog().recoverFromLog();
        // wait until recovery has finished
        assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS));
        assertJQ(req("q", "*:*"), "/response/numFound==5");
        assertJQ(req("q", "id:A2"), "/response/numFound==0");
        // no updates, so insure that recovery does not run
        h.close();
        int permits = logReplay.availablePermits();
        createCore();
        // Solr should kick this off now
        // h.getCore().getUpdateHandler().getUpdateLog().recoverFromLog();
        assertJQ(req("q", "*:*"), "/response/numFound==5");
        // assert that in-place update is retained
        assertJQ(req("q", "val_i_dvo:2"), "/response/numFound==1");
        Thread.sleep(100);
        // no updates, so insure that recovery didn't run
        assertEquals(permits, logReplay.availablePermits());
        assertEquals(UpdateLog.State.ACTIVE, h.getCore().getUpdateHandler().getUpdateLog().getState());
    } finally {
        DirectUpdateHandler2.commitOnClose = true;
        UpdateLog.testing_logReplayHook = null;
        UpdateLog.testing_logReplayFinishHook = null;
    }
}
Also used : Meter(com.codahale.metrics.Meter) Semaphore(java.util.concurrent.Semaphore) ArrayDeque(java.util.ArrayDeque) Gauge(com.codahale.metrics.Gauge) Metric(com.codahale.metrics.Metric) Test(org.junit.Test)

Aggregations

ArrayDeque (java.util.ArrayDeque)217 ArrayList (java.util.ArrayList)36 Test (org.junit.Test)36 IOException (java.io.IOException)27 HashMap (java.util.HashMap)23 List (java.util.List)20 HashSet (java.util.HashSet)19 Map (java.util.Map)17 Deque (java.util.Deque)11 Iterator (java.util.Iterator)10 NoSuchElementException (java.util.NoSuchElementException)8 AtomicLong (java.util.concurrent.atomic.AtomicLong)8 File (java.io.File)7 Path (java.nio.file.Path)7 Random (java.util.Random)7 ByteBuffer (java.nio.ByteBuffer)5 AtomicReference (java.util.concurrent.atomic.AtomicReference)5 HttpFields (org.eclipse.jetty.http.HttpFields)5 Name (com.github.anba.es6draft.ast.scope.Name)4 ExecutionContext (com.github.anba.es6draft.runtime.ExecutionContext)4