Search in sources :

Example 36 with MutablePair

use of org.apache.commons.lang3.tuple.MutablePair in project Taier by DTStack.

the class HdfsOperator method parseHdfsUri.

public static Pair<String, String> parseHdfsUri(String path) {
    Matcher matcher = pattern.matcher(path);
    if (matcher.find() && matcher.groupCount() == 2) {
        String hdfsUri = matcher.group(1);
        String hdfsPath = matcher.group(2);
        return new MutablePair<>(hdfsUri, hdfsPath);
    }
    return null;
}
Also used : MutablePair(org.apache.commons.lang3.tuple.MutablePair) Matcher(java.util.regex.Matcher)

Example 37 with MutablePair

use of org.apache.commons.lang3.tuple.MutablePair in project PanguEngine by UnknownDomainGames.

the class FlowPane method layoutChildren.

@Override
protected void layoutChildren() {
    Insets padding = padding().get();
    float spacing = spacing().get();
    List<Pair<List<Node>, Float>> groups = new ArrayList<>();
    var max = 0f;
    var tmpsize = 0f;
    var tmpgroup = new MutablePair<List<Node>, Float>(new ArrayList<>(), 0f);
    for (var child : getChildren()) {
        float pw = Utils.prefWidth(child);
        float ph = Utils.prefHeight(child);
        if (orientation.get() == Orientation.VERTICAL) {
            if (tmpsize + ph > getHeight()) {
                tmpsize = ph + spacing;
                tmpgroup.setRight(max);
                groups.add(tmpgroup);
                max = pw;
                tmpgroup = new MutablePair<>(new ArrayList<>(), 0f);
            } else {
                max = Math.max(max, pw);
                tmpsize += ph + spacing;
            }
        } else {
            if (tmpsize + pw > getWidth()) {
                tmpsize = pw + spacing;
                tmpgroup.setRight(max);
                groups.add(tmpgroup);
                max = ph;
                tmpgroup = new MutablePair<>(new ArrayList<>(), 0f);
            } else {
                max = Math.max(max, ph);
                tmpsize += pw + spacing;
            }
        }
        tmpgroup.left.add(child);
    }
    tmpgroup.setRight(max);
    groups.add(tmpgroup);
    var size = Math.min(orientation.get() == Orientation.VERTICAL ? getWidth() : getHeight(), groups.stream().mapToDouble(Pair::getRight).reduce(0, Double::sum));
    float x;
    float y;
    switch(alignment.get().getHpos()) {
        case RIGHT:
            x = getWidth() - padding.getRight();
            break;
        case CENTER:
            if (orientation.get() == Orientation.VERTICAL) {
                x = (float) (Math.max((getWidth() - padding.getLeft() - padding.getRight()) - size, 0) / 2 + padding.getLeft());
            } else {
                // Handle x in groups
                x = 0;
            }
            break;
        default:
            x = padding.getLeft();
            break;
    }
    switch(alignment.get().getVpos()) {
        case BOTTOM:
            y = getHeight() - padding.getBottom();
            break;
        case CENTER:
            if (orientation.get() == Orientation.HORIZONTAL) {
                y = (float) (Math.max(getHeight() - padding.getTop() - padding.getBottom() - size, 0) / 2 + padding.getTop());
            } else {
                // Handle y in groups
                y = 0;
            }
            break;
        default:
            y = padding.getTop();
            break;
    }
    float lineW = 0;
    float lineH = 0;
    for (Pair<List<Node>, Float> group : groups) {
        if (orientation.get() == Orientation.VERTICAL) {
            y = (float) (Math.max(getHeight() - padding.getTop() - padding.getBottom() - group.getLeft().stream().mapToDouble(Node::prefHeight).reduce(0, Double::sum) - spacing * (group.getLeft().size() - 1), 0) / 2 + padding.getTop());
        } else {
            x = (float) (Math.max(getWidth() - padding.getLeft() - padding.getRight() - group.getLeft().stream().mapToDouble(Node::prefWidth).reduce(0, Double::sum) - spacing * (group.getLeft().size() - 1), 0) / 2 + padding.getLeft());
        }
        for (var child : group.getLeft()) {
            float pw = Utils.prefWidth(child);
            float ph = Utils.prefHeight(child);
            float x1;
            if (alignment.get().getHpos() == Pos.HPos.RIGHT) {
                x1 = x - pw;
            } else if (alignment.get().getHpos() == Pos.HPos.CENTER) {
                if (orientation.get() == Orientation.HORIZONTAL) {
                    x1 = x;
                } else {
                    x1 = x + (group.getRight() - pw) / 2;
                }
            } else {
                x1 = x;
            }
            float y1;
            if (alignment.get().getVpos() == Pos.VPos.BOTTOM) {
                y1 = y - ph;
            } else if (alignment.get().getVpos() == Pos.VPos.CENTER) {
                if (orientation.get() == Orientation.VERTICAL) {
                    y1 = y;
                } else {
                    y1 = y + (group.getRight() - ph) / 2;
                }
            } else {
                y1 = y;
            }
            layoutInArea(child, snap(x1, true), snap(y1, true), snap(pw, true), snap(ph, true));
            if (orientation.get() == Orientation.HORIZONTAL) {
                x = x1 + (alignment.get().getHpos() == Pos.HPos.RIGHT ? -spacing : spacing + pw);
            } else {
                y = y1 + (alignment.get().getVpos() == Pos.VPos.BOTTOM ? -spacing : spacing + ph);
            }
        }
        if (orientation.get() == Orientation.VERTICAL) {
            x += group.getRight();
        } else {
            y += group.getRight();
        }
    }
}
Also used : Insets(engine.gui.misc.Insets) Node(engine.gui.Node) ArrayList(java.util.ArrayList) MutablePair(org.apache.commons.lang3.tuple.MutablePair) ArrayList(java.util.ArrayList) List(java.util.List) MutablePair(org.apache.commons.lang3.tuple.MutablePair) Pair(org.apache.commons.lang3.tuple.Pair)

Example 38 with MutablePair

use of org.apache.commons.lang3.tuple.MutablePair in project BlackLab by INL.

the class HitGroupsTokenFrequencies method get.

/**
 * Get the token frequencies for the given query and hit property.
 *
 * @param source query to find token frequencies for
 * @param requestedGroupingProperty
 * @return token frequencies
 */
public static HitGroups get(SearchHits source, HitProperty requestedGroupingProperty) {
    QueryInfo queryInfo = source.queryInfo();
    Query filterQuery = source.getFilterQuery();
    SearchSettings searchSettings = source.searchSettings();
    try {
        /**
         * This is where we store our groups while we're computing/gathering them. Maps from group Id to number of hits (left) and number of docs (right)
         */
        final ConcurrentHashMap<GroupIdHash, MutablePair<Integer, Integer>> occurances = new ConcurrentHashMap<>();
        final BlackLabIndex index = queryInfo.index();
        /**
         * Document properties that are used in the grouping. (e.g. for query "all tokens, grouped by lemma + document year", will contain DocProperty("document year")
         * This is not necessarily limited to just metadata, can also contain any other DocProperties such as document ID, document length, etc.
         */
        final List<DocProperty> docProperties = new ArrayList<>();
        /**
         * Token properties that need to be grouped on, with sensitivity (case-sensitive grouping or not) and Terms
         */
        final List<Triple<AnnotationForwardIndex, MatchSensitivity, Terms>> hitProperties = new ArrayList<>();
        /**
         * Stores the original index every (doc|hit)property has in the original interleaved/intertwined list.
         * The requestedGroupingProperty sometimes represents more than one property (in the form of HitPropertyMultiple) such as 3 properties: [token text, document year, token lemma]
         * The groups always get an id that is (roughly) the concatenation of the properties (in the example case [token text, document year, token lemma]),
         * and it's important this id contains the respective values in the same order.
         * We need to keep this list because otherwise we'd potentially change the order.
         *
         * Integer contains index in the source list (docProperties or hitProperties, from just above)
         * Boolean is true when origin list was docProperties, false for hitProperties.
         */
        final List<Pair<Integer, Boolean>> originalOrderOfUnpackedProperties = new ArrayList<>();
        // Unpack the requestedGroupingProperty into its constituents and sort those into the appropriate categories: hit and doc properties.
        {
            List<HitProperty> props = requestedGroupingProperty.props() != null ? requestedGroupingProperty.props() : Arrays.asList(requestedGroupingProperty);
            for (HitProperty p : props) {
                final DocProperty asDocPropIfApplicable = p.docPropsOnly();
                if (asDocPropIfApplicable != null) {
                    // property can be converted to docProperty (applies to the document instead of the token/hit)
                    if (DEBUG && asDocPropIfApplicable.props() != null) {
                        throw new RuntimeException("Nested PropertyMultiples detected, should never happen (when this code was originally written)");
                    }
                    final int positionInUnpackedList = docProperties.size();
                    docProperties.add(asDocPropIfApplicable);
                    originalOrderOfUnpackedProperties.add(Pair.of(positionInUnpackedList, true));
                } else {
                    // Property couldn't be converted to DocProperty (is null). The current property is an actual HitProperty (applies to annotation/token/hit value)
                    List<Annotation> annot = p.needsContext();
                    if (DEBUG && (annot == null || annot.size() != 1)) {
                        throw new RuntimeException("Grouping property does not apply to singular annotation (nested propertymultiple? non-annotation grouping?) should never happen.");
                    }
                    final int positionInUnpackedList = hitProperties.size();
                    final AnnotationForwardIndex annotationFI = index.annotationForwardIndex(annot.get(0));
                    hitProperties.add(Triple.of(annotationFI, p.getSensitivities().get(0), annotationFI.terms()));
                    originalOrderOfUnpackedProperties.add(Pair.of(positionInUnpackedList, false));
                }
            }
        }
        final int numAnnotations = hitProperties.size();
        long numberOfDocsProcessed;
        final AtomicInteger numberOfHitsProcessed = new AtomicInteger();
        final AtomicBoolean hitMaxHitsToProcess = new AtomicBoolean(false);
        try (final BlockTimer c = BlockTimer.create("Top Level")) {
            final List<Integer> docIds = new ArrayList<>();
            try (BlockTimer d = c.child("Gathering documents")) {
                queryInfo.index().searcher().search(filterQuery == null ? new MatchAllDocsQuery() : filterQuery, new SimpleCollector() {

                    private int docBase;

                    @Override
                    protected void doSetNextReader(LeafReaderContext context) throws IOException {
                        docBase = context.docBase;
                        super.doSetNextReader(context);
                    }

                    @Override
                    public void collect(int docId) throws IOException {
                        int globalDocId = docId + docBase;
                        docIds.add(globalDocId);
                    }

                    @Override
                    public boolean needsScores() {
                        return false;
                    }
                });
            }
            numberOfDocsProcessed = docIds.size();
            final IndexReader reader = queryInfo.index().reader();
            final int[] minusOne = new int[] { -1 };
            // What we do instead is for every document just retrieve how many tokens it contains (from its metadata), and add that count to the appropriate group
            if (hitProperties.isEmpty()) {
                try (BlockTimer f = c.child("Grouping documents (metadata only path)")) {
                    String fieldName = index.mainAnnotatedField().name();
                    DocPropertyAnnotatedFieldLength propTokens = new DocPropertyAnnotatedFieldLength(index, fieldName);
                    final int[] emptyTokenValuesArray = new int[0];
                    docIds.parallelStream().forEach(docId -> {
                        // ignore "extra closing token"
                        final int docLength = (int) propTokens.get(docId) - subtractClosingToken;
                        final DocResult synthesizedDocResult = DocResult.fromDoc(queryInfo, new PropertyValueDoc(new DocImpl(queryInfo.index(), docId)), 0, docLength);
                        final PropertyValue[] metadataValuesForGroup = new PropertyValue[docProperties.size()];
                        for (int i = 0; i < docProperties.size(); ++i) {
                            metadataValuesForGroup[i] = docProperties.get(i).get(synthesizedDocResult);
                        }
                        // precompute, it's the same for all hits in document
                        final int metadataValuesHash = Arrays.hashCode(metadataValuesForGroup);
                        numberOfHitsProcessed.addAndGet(docLength);
                        // Add all tokens in document to the group.
                        final GroupIdHash groupId = new GroupIdHash(emptyTokenValuesArray, emptyTokenValuesArray, metadataValuesForGroup, metadataValuesHash);
                        occurances.compute(groupId, (__, groupSizes) -> {
                            if (groupSizes != null) {
                                groupSizes.left += docLength;
                                groupSizes.right += 1;
                                return groupSizes;
                            } else {
                                return MutablePair.of(docLength, 1);
                            }
                        });
                    });
                }
            } else {
                final int maxHitsToProcess = searchSettings.maxHitsToProcess() > 0 ? searchSettings.maxHitsToProcess() : Integer.MAX_VALUE;
                final IntUnaryOperator incrementUntilMax = (v) -> v < maxHitsToProcess ? v + 1 : v;
                final String fieldName = index.mainAnnotatedField().name();
                final String lengthTokensFieldName = AnnotatedFieldNameUtil.lengthTokensField(fieldName);
                numberOfDocsProcessed = docIds.parallelStream().filter(docId -> {
                    try {
                        // Step 1: read all values for the to-be-grouped annotations for this document
                        // This will create one int[] for every annotation, containing ids that map to the values for this document for this annotation
                        final Document doc = reader.document(docId);
                        final List<int[]> tokenValuesPerAnnotation = new ArrayList<>();
                        try (BlockTimer e = c.child("Read annotations from forward index")) {
                            for (Triple<AnnotationForwardIndex, MatchSensitivity, Terms> annot : hitProperties) {
                                final String annotationFIName = annot.getLeft().annotation().forwardIndexIdField();
                                final int fiid = doc.getField(annotationFIName).numericValue().intValue();
                                final List<int[]> tokenValues = annot.getLeft().retrievePartsInt(fiid, minusOne, minusOne);
                                tokenValuesPerAnnotation.addAll(tokenValues);
                            }
                        }
                        // Step 2: retrieve the to-be-grouped metadata for this document
                        // ignore "extra closing token"
                        int docLength = Integer.parseInt(doc.get(lengthTokensFieldName)) - subtractClosingToken;
                        final DocResult synthesizedDocResult = DocResult.fromDoc(queryInfo, new PropertyValueDoc(new DocImpl(queryInfo.index(), docId)), 0, docLength);
                        final PropertyValue[] metadataValuesForGroup = !docProperties.isEmpty() ? new PropertyValue[docProperties.size()] : null;
                        for (int i = 0; i < docProperties.size(); ++i) {
                            metadataValuesForGroup[i] = docProperties.get(i).get(synthesizedDocResult);
                        }
                        // precompute, it's the same for all hits in document
                        final int metadataValuesHash = Arrays.hashCode(metadataValuesForGroup);
                        // now we have all values for all relevant annotations for this document
                        // iterate again and pair up the nth entries for all annotations, then store that as a group.
                        /**
                         * Bookkeeping: track which groups we've already seen in this document,
                         * so we only count this document once per group
                         */
                        HashSet<GroupIdHash> groupsInThisDocument = new HashSet<>();
                        try (BlockTimer f = c.child("Group tokens")) {
                            for (int tokenIndex = 0; tokenIndex < docLength; ++tokenIndex) {
                                if (numberOfHitsProcessed.getAndUpdate(incrementUntilMax) >= maxHitsToProcess) {
                                    hitMaxHitsToProcess.set(true);
                                    // true if any token of this document made the cut, false if we escaped immediately
                                    return tokenIndex > 0;
                                }
                                // Unfortunate fact: token ids are case-sensitive, and in order to group on a token's values case and diacritics insensitively,
                                // we need to actually group by their "sort positions" - which is just the index the term would have if all terms would have been sorted
                                // so in essence it's also an "id", but a case-insensitive one.
                                // we could further optimize to not do this step when grouping sensitively by making a specialized instance of the GroupIdHash class
                                // that hashes the token ids instead of the sortpositions in that case.
                                int[] annotationValuesForThisToken = new int[numAnnotations];
                                int[] sortPositions = new int[annotationValuesForThisToken.length];
                                for (int annotationIndex = 0; annotationIndex < numAnnotations; ++annotationIndex) {
                                    int[] tokenValuesThisAnnotation = tokenValuesPerAnnotation.get(annotationIndex);
                                    final int termId = annotationValuesForThisToken[annotationIndex] = tokenValuesThisAnnotation[tokenIndex];
                                    Triple<AnnotationForwardIndex, MatchSensitivity, Terms> currentHitProp = hitProperties.get(annotationIndex);
                                    MatchSensitivity matchSensitivity = currentHitProp.getMiddle();
                                    Terms terms = currentHitProp.getRight();
                                    sortPositions[annotationIndex] = terms.idToSortPosition(termId, matchSensitivity);
                                }
                                final GroupIdHash groupId = new GroupIdHash(annotationValuesForThisToken, sortPositions, metadataValuesForGroup, metadataValuesHash);
                                occurances.compute(groupId, (__, groupSize) -> {
                                    if (groupSize != null) {
                                        groupSize.left += 1;
                                        // second (or more) occurance of these token values in this document
                                        groupSize.right += groupsInThisDocument.add(groupId) ? 1 : 0;
                                        return groupSize;
                                    } else {
                                        // should always return true, but we need to add this group anyway!
                                        return MutablePair.of(1, groupsInThisDocument.add(groupId) ? 1 : 0);
                                    }
                                });
                            }
                        }
                    } catch (IOException e) {
                        throw BlackLabRuntimeException.wrap(e);
                    }
                    return true;
                }).count();
                logger.trace("Number of processed docs: " + numberOfDocsProcessed);
            }
        }
        Set<PropertyValue> duplicateGroupsDebug = DEBUG ? new HashSet<PropertyValue>() : null;
        List<HitGroup> groups;
        try (final BlockTimer c = BlockTimer.create("Resolve string values for tokens")) {
            final int numMetadataValues = docProperties.size();
            groups = occurances.entrySet().parallelStream().map(e -> {
                final int groupSizeHits = e.getValue().getLeft();
                final int groupSizeDocs = e.getValue().getRight();
                final int[] annotationValues = e.getKey().tokenIds;
                final PropertyValue[] metadataValues = e.getKey().metadataValues;
                // allocate new - is not copied when moving into propertyvaluemultiple
                final PropertyValue[] groupIdAsList = new PropertyValue[numAnnotations + numMetadataValues];
                // Convert all raw values (integers) into their appropriate PropertyValues
                // Taking care to preserve the order of the resultant PropertyValues with the order of the input HitProperties
                int indexInOutput = 0;
                for (Pair<Integer, Boolean> p : originalOrderOfUnpackedProperties) {
                    final int indexInInput = p.getLeft();
                    if (p.getRight()) {
                        // is docprop, add PropertyValue as-is
                        groupIdAsList[indexInOutput++] = metadataValues[indexInInput];
                    } else {
                        // is hitprop, convert value to PropertyValue.
                        Annotation annot = hitProperties.get(indexInInput).getLeft().annotation();
                        MatchSensitivity sens = hitProperties.get(indexInInput).getMiddle();
                        groupIdAsList[indexInOutput++] = new PropertyValueContextWords(index, annot, sens, new int[] { annotationValues[indexInInput] }, false);
                    }
                }
                PropertyValue groupId = groupIdAsList.length > 1 ? new PropertyValueMultiple(groupIdAsList) : groupIdAsList[0];
                if (DEBUG) {
                    synchronized (duplicateGroupsDebug) {
                        if (!duplicateGroupsDebug.add(groupId)) {
                            throw new RuntimeException("Identical groups - should never happen");
                        }
                    }
                }
                return new HitGroupWithoutResults(queryInfo, groupId, groupSizeHits, groupSizeDocs, false, false);
            }).collect(Collectors.toList());
        }
        logger.debug("fast path used for grouping");
        ResultsStats hitsStats = new ResultsStatsStatic(numberOfHitsProcessed.get(), numberOfHitsProcessed.get(), new MaxStats(hitMaxHitsToProcess.get(), hitMaxHitsToProcess.get()));
        ResultsStats docsStats = new ResultsStatsStatic((int) numberOfDocsProcessed, (int) numberOfDocsProcessed, new MaxStats(hitMaxHitsToProcess.get(), hitMaxHitsToProcess.get()));
        return HitGroups.fromList(queryInfo, groups, requestedGroupingProperty, null, null, hitsStats, docsStats);
    } catch (IOException e) {
        throw BlackLabRuntimeException.wrap(e);
    }
}
Also used : Query(org.apache.lucene.search.Query) java.util(java.util) BlackLabIndex(nl.inl.blacklab.search.BlackLabIndex) IntUnaryOperator(java.util.function.IntUnaryOperator) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) SearchHits(nl.inl.blacklab.searches.SearchHits) nl.inl.blacklab.resultproperty(nl.inl.blacklab.resultproperty) Document(org.apache.lucene.document.Document) AnnotatedFieldNameUtil(nl.inl.blacklab.search.indexmetadata.AnnotatedFieldNameUtil) MutablePair(org.apache.commons.lang3.tuple.MutablePair) Pair(org.apache.commons.lang3.tuple.Pair) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Terms(nl.inl.blacklab.forwardindex.Terms) AnnotationForwardIndex(nl.inl.blacklab.forwardindex.AnnotationForwardIndex) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) Triple(org.apache.commons.lang3.tuple.Triple) BlackLabRuntimeException(nl.inl.blacklab.exceptions.BlackLabRuntimeException) Annotation(nl.inl.blacklab.search.indexmetadata.Annotation) SimpleCollector(org.apache.lucene.search.SimpleCollector) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) IOException(java.io.IOException) MatchSensitivity(nl.inl.blacklab.search.indexmetadata.MatchSensitivity) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) Collectors(java.util.stream.Collectors) Logger(org.apache.logging.log4j.Logger) DocImpl(nl.inl.blacklab.search.DocImpl) LogManager(org.apache.logging.log4j.LogManager) IndexReader(org.apache.lucene.index.IndexReader) BlockTimer(nl.inl.util.BlockTimer) Query(org.apache.lucene.search.Query) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) IntUnaryOperator(java.util.function.IntUnaryOperator) Document(org.apache.lucene.document.Document) SimpleCollector(org.apache.lucene.search.SimpleCollector) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Terms(nl.inl.blacklab.forwardindex.Terms) BlackLabIndex(nl.inl.blacklab.search.BlackLabIndex) Triple(org.apache.commons.lang3.tuple.Triple) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) BlockTimer(nl.inl.util.BlockTimer) MutablePair(org.apache.commons.lang3.tuple.MutablePair) BlackLabRuntimeException(nl.inl.blacklab.exceptions.BlackLabRuntimeException) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) MatchSensitivity(nl.inl.blacklab.search.indexmetadata.MatchSensitivity) MutablePair(org.apache.commons.lang3.tuple.MutablePair) Pair(org.apache.commons.lang3.tuple.Pair) AnnotationForwardIndex(nl.inl.blacklab.forwardindex.AnnotationForwardIndex) IOException(java.io.IOException) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) Annotation(nl.inl.blacklab.search.indexmetadata.Annotation) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) IndexReader(org.apache.lucene.index.IndexReader) DocImpl(nl.inl.blacklab.search.DocImpl)

Example 39 with MutablePair

use of org.apache.commons.lang3.tuple.MutablePair in project BleachHack by BleachDrinker420.

the class Texts method unpack.

/**
 * Unpacks a text and all its nested siblings into a list of texts. *
 */
public static List<Text> unpack(Text text) {
    Stack<MutablePair<Text, Integer>> nodes = new Stack<>();
    List<Text> texts = new ArrayList<>();
    nodes.push(MutablePair.of(text, 0));
    texts.add(copy(text, true, false));
    while (!nodes.isEmpty()) {
        MutablePair<Text, Integer> pair = nodes.peek();
        if (pair.getLeft().getSiblings().size() > pair.getRight()) {
            Text sibling = pair.getLeft().getSiblings().get(pair.getRight());
            nodes.push(MutablePair.of(sibling, 0));
            texts.add(copy(sibling, true, false));
            pair.setRight(pair.getRight() + 1);
        } else {
            nodes.pop();
        }
    }
    return texts;
}
Also used : MutablePair(org.apache.commons.lang3.tuple.MutablePair) ArrayList(java.util.ArrayList) LiteralText(net.minecraft.text.LiteralText) BaseText(net.minecraft.text.BaseText) MutableText(net.minecraft.text.MutableText) Text(net.minecraft.text.Text) Stack(java.util.Stack)

Example 40 with MutablePair

use of org.apache.commons.lang3.tuple.MutablePair in project pulsar by apache.

the class Consumer method individualAckWithTransaction.

// this method is for individual ack carry the transaction
private CompletableFuture<Void> individualAckWithTransaction(CommandAck ack) {
    // Individual ack
    List<MutablePair<PositionImpl, Integer>> positionsAcked = new ArrayList<>();
    if (!isTransactionEnabled()) {
        return FutureUtil.failedFuture(new BrokerServiceException.NotAllowedException("Server don't support transaction ack!"));
    }
    for (int i = 0; i < ack.getMessageIdsCount(); i++) {
        MessageIdData msgId = ack.getMessageIdAt(i);
        PositionImpl position;
        long ackedCount = 0;
        long batchSize = getBatchSize(msgId);
        Consumer ackOwnerConsumer = getAckOwnerConsumer(msgId.getLedgerId(), msgId.getEntryId());
        if (msgId.getAckSetsCount() > 0) {
            long[] ackSets = new long[msgId.getAckSetsCount()];
            for (int j = 0; j < msgId.getAckSetsCount(); j++) {
                ackSets[j] = msgId.getAckSetAt(j);
            }
            position = PositionImpl.get(msgId.getLedgerId(), msgId.getEntryId(), ackSets);
            ackedCount = getAckedCountForTransactionAck(batchSize, ackSets);
        } else {
            position = PositionImpl.get(msgId.getLedgerId(), msgId.getEntryId());
            ackedCount = batchSize;
        }
        positionsAcked.add(new MutablePair<>(position, (int) batchSize));
        addAndGetUnAckedMsgs(ackOwnerConsumer, -(int) ackedCount);
        checkCanRemovePendingAcksAndHandle(position, msgId);
        checkAckValidationError(ack, position);
    }
    CompletableFuture<Void> completableFuture = transactionIndividualAcknowledge(ack.getTxnidMostBits(), ack.getTxnidLeastBits(), positionsAcked);
    if (Subscription.isIndividualAckMode(subType)) {
        completableFuture.whenComplete((v, e) -> positionsAcked.forEach(positionLongMutablePair -> {
            if (positionLongMutablePair.getLeft().getAckSet() != null) {
                if (((PersistentSubscription) subscription).checkIsCanDeleteConsumerPendingAck(positionLongMutablePair.left)) {
                    removePendingAcks(positionLongMutablePair.left);
                }
            }
        }));
    }
    return completableFuture;
}
Also used : LongAdder(java.util.concurrent.atomic.LongAdder) AtomicIntegerFieldUpdater(java.util.concurrent.atomic.AtomicIntegerFieldUpdater) Setter(lombok.Setter) MutableInt(org.apache.commons.lang3.mutable.MutableInt) TopicName(org.apache.pulsar.common.naming.TopicName) LongPair(org.apache.pulsar.common.util.collections.ConcurrentLongLongPairHashMap.LongPair) Getter(lombok.Getter) AckType(org.apache.pulsar.common.api.proto.CommandAck.AckType) MessageIdData(org.apache.pulsar.common.api.proto.MessageIdData) Entry(org.apache.bookkeeper.mledger.Entry) LoggerFactory(org.slf4j.LoggerFactory) CompletableFuture(java.util.concurrent.CompletableFuture) KeySharedMeta(org.apache.pulsar.common.api.proto.KeySharedMeta) ArrayList(java.util.ArrayList) Commands(org.apache.pulsar.common.protocol.Commands) SubType(org.apache.pulsar.common.api.proto.CommandSubscribe.SubType) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) TxnID(org.apache.pulsar.client.api.transaction.TxnID) Lists(com.google.common.collect.Lists) KeyLongValue(org.apache.pulsar.common.api.proto.KeyLongValue) DateFormatter(org.apache.pulsar.common.util.DateFormatter) MutablePair(org.apache.commons.lang3.tuple.MutablePair) Map(java.util.Map) ConsumerStatsImpl(org.apache.pulsar.common.policies.data.stats.ConsumerStatsImpl) CommandAck(org.apache.pulsar.common.api.proto.CommandAck) PositionImpl(org.apache.bookkeeper.mledger.impl.PositionImpl) DEFAULT_CONSUMER_EPOCH(org.apache.pulsar.common.protocol.Commands.DEFAULT_CONSUMER_EPOCH) Logger(org.slf4j.Logger) Promise(io.netty.util.concurrent.Promise) InitialPosition(org.apache.pulsar.common.api.proto.CommandSubscribe.InitialPosition) TransactionConflictException(org.apache.pulsar.transaction.common.exception.TransactionConflictException) BitSetRecyclable(org.apache.pulsar.common.util.collections.BitSetRecyclable) MoreObjects(com.google.common.base.MoreObjects) Position(org.apache.bookkeeper.mledger.Position) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) List(java.util.List) PersistentTopic(org.apache.pulsar.broker.service.persistent.PersistentTopic) FutureUtil(org.apache.pulsar.common.util.FutureUtil) MessageId(org.apache.pulsar.client.api.MessageId) PersistentSubscription(org.apache.pulsar.broker.service.persistent.PersistentSubscription) ConcurrentLongLongPairHashMap(org.apache.pulsar.common.util.collections.ConcurrentLongLongPairHashMap) BitSet(java.util.BitSet) Future(io.netty.util.concurrent.Future) Collections(java.util.Collections) Rate(org.apache.pulsar.common.stats.Rate) MessageIdData(org.apache.pulsar.common.api.proto.MessageIdData) PositionImpl(org.apache.bookkeeper.mledger.impl.PositionImpl) ArrayList(java.util.ArrayList) MutablePair(org.apache.commons.lang3.tuple.MutablePair)

Aggregations

MutablePair (org.apache.commons.lang3.tuple.MutablePair)116 Pair (org.apache.commons.lang3.tuple.Pair)49 ArrayList (java.util.ArrayList)32 Test (org.junit.Test)32 HashMap (java.util.HashMap)29 Message (com.microsoft.azure.sdk.iot.device.Message)27 IotHubTransportMessage (com.microsoft.azure.sdk.iot.device.transport.IotHubTransportMessage)27 ConcurrentLinkedQueue (java.util.concurrent.ConcurrentLinkedQueue)23 List (java.util.List)20 MqttDeviceTwin (com.microsoft.azure.sdk.iot.device.transport.mqtt.MqttDeviceTwin)17 Map (java.util.Map)14 IOException (java.io.IOException)13 WalkPosition (org.openbw.bwapi4j.WalkPosition)9 MiniTile (bwem.tile.MiniTile)8 DeviceOperations (com.microsoft.azure.sdk.iot.device.DeviceTwin.DeviceOperations)8 AreaId (bwem.area.typedef.AreaId)7 Collectors (java.util.stream.Collectors)7 PositionImpl (org.apache.bookkeeper.mledger.impl.PositionImpl)7 HashSet (java.util.HashSet)6 TileImpl (bwem.tile.TileImpl)5