Search in sources :

Example 1 with EvaluationTrackingFunction

use of datawave.query.iterator.profile.EvaluationTrackingFunction in project datawave by NationalSecurityAgency.

the class QueryIterator method createDocumentPipeline.

/**
 * Create the pipeline. It is very important that this pipeline can handle resetting the bottom iterator with a new value. This means that hasNext() needs
 * to call the next iterator. The only state that can be maintained is the next value ready after hasNext() has been called. Once next returns the value,
 * the next hasNext() call must call the next iterator again. So for example Iterators.filter() cannot be used as it uses a google commons AbstractIterator
 * that maintains an iterator state (failed, ready, done); use statelessFilter above instead.
 *
 * @param deepSourceCopy
 * @param documentSpecificSource
 * @return iterator of keys and values
 */
public Iterator<Entry<Key, Document>> createDocumentPipeline(SortedKeyValueIterator<Key, Value> deepSourceCopy, final NestedQueryIterator<Key> documentSpecificSource, Collection<ByteSequence> columnFamilies, boolean inclusive, QuerySpanCollector querySpanCollector) {
    QuerySpan trackingSpan = null;
    if (gatherTimingDetails()) {
        trackingSpan = new QuerySpan(getStatsdClient());
    }
    if (log.isTraceEnabled()) {
        log.trace("createDocumentPipeline");
    }
    final Function<Entry<Key, Document>, Entry<DocumentData, Document>> docMapper;
    if (isFieldIndexSatisfyingQuery()) {
        if (log.isTraceEnabled()) {
            log.trace("isFieldIndexSatisfyingQuery");
        }
        docMapper = new Function<Entry<Key, Document>, Entry<DocumentData, Document>>() {

            @Nullable
            @Override
            public Entry<DocumentData, Document> apply(@Nullable Entry<Key, Document> input) {
                Entry<DocumentData, Document> entry = null;
                if (input != null) {
                    entry = Maps.immutableEntry(new DocumentData(input.getKey(), Collections.singleton(input.getKey()), Collections.EMPTY_LIST, true), input.getValue());
                }
                return entry;
            }
        };
    } else {
        docMapper = new KeyToDocumentData(deepSourceCopy, myEnvironment, documentOptions, super.equality, getEvaluationFilter(), this.includeHierarchyFields, this.includeHierarchyFields);
    }
    Iterator<Entry<DocumentData, Document>> sourceIterator = Iterators.transform(documentSpecificSource, from -> {
        Entry<Key, Document> entry = Maps.immutableEntry(from, documentSpecificSource.document());
        return docMapper.apply(entry);
    });
    // Take the document Keys and transform it into Entry<Key,Document>,
    // removing Attributes for this Document
    // which do not fall within the expected time range
    Iterator<Entry<Key, Document>> documents = null;
    Aggregation a = new Aggregation(this.getTimeFilter(), this.typeMetadataWithNonIndexed, compositeMetadata, this.isIncludeGroupingContext(), this.includeRecordId, this.disableIndexOnlyDocuments(), getEvaluationFilter(), isTrackSizes());
    if (gatherTimingDetails()) {
        documents = Iterators.transform(sourceIterator, new EvaluationTrackingFunction<>(QuerySpan.Stage.Aggregation, trackingSpan, a));
    } else {
        documents = Iterators.transform(sourceIterator, a);
    }
    // Inject the data type as a field if the user requested it
    if (this.includeDatatype) {
        if (gatherTimingDetails()) {
            documents = Iterators.transform(documents, new EvaluationTrackingFunction<>(QuerySpan.Stage.DataTypeAsField, trackingSpan, new DataTypeAsField(this.datatypeKey)));
        } else {
            documents = Iterators.transform(documents, new DataTypeAsField(this.datatypeKey));
        }
    }
    // Inject the document permutations if required
    if (!this.getDocumentPermutations().isEmpty()) {
        if (gatherTimingDetails()) {
            documents = Iterators.transform(documents, new EvaluationTrackingFunction<>(QuerySpan.Stage.DocumentPermutation, trackingSpan, new DocumentPermutation.DocumentPermutationAggregation(this.getDocumentPermutations())));
        } else {
            documents = Iterators.transform(documents, new DocumentPermutation.DocumentPermutationAggregation(this.getDocumentPermutations()));
        }
    }
    if (gatherTimingDetails()) {
        documents = new EvaluationTrackingIterator(QuerySpan.Stage.DocumentEvaluation, trackingSpan, getEvaluation(documentSpecificSource, deepSourceCopy, documents, compositeMetadata, typeMetadataWithNonIndexed, columnFamilies, inclusive));
    } else {
        documents = getEvaluation(documentSpecificSource, deepSourceCopy, documents, compositeMetadata, typeMetadataWithNonIndexed, columnFamilies, inclusive);
    }
    // a hook to allow mapping the document such as with the TLD or Parent
    // query logics
    // or if the document was not aggregated in the first place because the
    // field index fields completely satisfied the query
    documents = mapDocument(deepSourceCopy, documents, compositeMetadata);
    // apply any configured post processing
    documents = getPostProcessingChain(documents);
    if (gatherTimingDetails()) {
        documents = new EvaluationTrackingIterator(QuerySpan.Stage.PostProcessing, trackingSpan, documents);
    }
    // Filter out masked values if requested
    if (this.filterMaskedValues) {
        MaskedValueFilterInterface mvfi = MaskedValueFilterFactory.get(this.isIncludeGroupingContext(), this.isReducedResponse());
        if (gatherTimingDetails()) {
            documents = Iterators.transform(documents, new EvaluationTrackingFunction<>(QuerySpan.Stage.MaskedValueFilter, trackingSpan, mvfi));
        } else {
            documents = Iterators.transform(documents, mvfi);
        }
    }
    // now filter the attributes to those with the keep flag set true
    if (gatherTimingDetails()) {
        documents = Iterators.transform(documents, new EvaluationTrackingFunction<>(QuerySpan.Stage.AttributeKeepFilter, trackingSpan, new AttributeKeepFilter<>()));
    } else {
        documents = Iterators.transform(documents, new AttributeKeepFilter<>());
    }
    // Project fields using a whitelist or a blacklist before serialization
    if (this.projectResults) {
        if (gatherTimingDetails()) {
            documents = Iterators.transform(documents, new EvaluationTrackingFunction<>(QuerySpan.Stage.DocumentProjection, trackingSpan, getProjection()));
        } else {
            documents = Iterators.transform(documents, getProjection());
        }
    }
    // remove the composite entries
    documents = Iterators.transform(documents, this.getCompositeProjection());
    // projection or visibility filtering)
    if (gatherTimingDetails()) {
        documents = statelessFilter(documents, new EvaluationTrackingPredicate<>(QuerySpan.Stage.EmptyDocumentFilter, trackingSpan, new EmptyDocumentFilter()));
        documents = Iterators.transform(documents, new EvaluationTrackingFunction<>(QuerySpan.Stage.DocumentMetadata, trackingSpan, new DocumentMetadata()));
    } else {
        documents = statelessFilter(documents, new EmptyDocumentFilter());
        documents = Iterators.transform(documents, new DocumentMetadata());
    }
    if (!this.limitFieldsMap.isEmpty()) {
        if (gatherTimingDetails()) {
            documents = Iterators.transform(documents, new EvaluationTrackingFunction<>(QuerySpan.Stage.LimitFields, trackingSpan, new LimitFields(this.getLimitFieldsMap())));
        } else {
            documents = Iterators.transform(documents, new LimitFields(this.getLimitFieldsMap()));
        }
    }
    // do I need to remove the grouping context I added above?
    if (groupingContextAddedByMe) {
        if (gatherTimingDetails()) {
            documents = Iterators.transform(documents, new EvaluationTrackingFunction<>(QuerySpan.Stage.RemoveGroupingContext, trackingSpan, new RemoveGroupingContext()));
        } else {
            documents = Iterators.transform(documents, new RemoveGroupingContext());
        }
    }
    // only add the pipeline query span collection iterator which will cache metrics with each document if collectTimingDetails is true
    if (collectTimingDetails) {
        // if there is not a result, then add the trackingSpan to the
        // QuerySpanCollector
        // if there was a result, then the metrics from the trackingSpan
        // will be added here
        documents = new PipelineQuerySpanCollectionIterator(querySpanCollector, trackingSpan, documents);
    }
    return documents;
}
Also used : Document(datawave.query.attributes.Document) KeyToDocumentData(datawave.query.function.KeyToDocumentData) Aggregation(datawave.query.function.Aggregation) DocumentPermutation(datawave.query.function.DocumentPermutation) Entry(java.util.Map.Entry) TupleToEntry(datawave.query.util.TupleToEntry) EvaluationTrackingFunction(datawave.query.iterator.profile.EvaluationTrackingFunction) RemoveGroupingContext(datawave.query.function.RemoveGroupingContext) EmptyDocumentFilter(datawave.query.predicate.EmptyDocumentFilter) DataTypeAsField(datawave.query.function.DataTypeAsField) EvaluationTrackingIterator(datawave.query.iterator.profile.EvaluationTrackingIterator) PipelineQuerySpanCollectionIterator(datawave.query.iterator.profile.PipelineQuerySpanCollectionIterator) QuerySpan(datawave.query.iterator.profile.QuerySpan) MultiThreadedQuerySpan(datawave.query.iterator.profile.MultiThreadedQuerySpan) MaskedValueFilterInterface(datawave.query.function.MaskedValueFilterInterface) DocumentData(datawave.query.iterator.aggregation.DocumentData) KeyToDocumentData(datawave.query.function.KeyToDocumentData) EvaluationTrackingPredicate(datawave.query.iterator.profile.EvaluationTrackingPredicate) LimitFields(datawave.query.function.LimitFields) DocumentMetadata(datawave.query.function.DocumentMetadata) AttributeKeepFilter(datawave.query.attributes.AttributeKeepFilter) Nullable(javax.annotation.Nullable) Key(org.apache.accumulo.core.data.Key)

Example 2 with EvaluationTrackingFunction

use of datawave.query.iterator.profile.EvaluationTrackingFunction in project datawave by NationalSecurityAgency.

the class FieldIndexOnlyQueryIterator method seek.

@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
    if (log.isDebugEnabled()) {
        log.debug("Seek range: " + range);
    }
    this.range = range;
    Iterator<Entry<Key, Document>> fieldIndexDocuments = null;
    try {
        fieldIndexDocuments = getDocumentIterator(range, columnFamilies, inclusive);
    } catch (ConfigException e) {
        throw new IOException("Unable to create document iterator", e);
    } catch (IllegalAccessException e) {
        throw new IOException("Unable to create document iterator", e);
    } catch (InstantiationException e) {
        throw new IOException("Unable to create document iterator", e);
    }
    // Inject the data type as a field if the user requested it
    if (this.includeDatatype) {
        if (collectTimingDetails) {
            fieldIndexDocuments = Iterators.transform(fieldIndexDocuments, new EvaluationTrackingFunction<>(QuerySpan.Stage.DataTypeAsField, trackingSpan, new DataTypeAsField(this.datatypeKey)));
        } else {
            fieldIndexDocuments = Iterators.transform(fieldIndexDocuments, new DataTypeAsField(this.datatypeKey));
        }
    }
    // Filter out masked values if requested
    if (this.filterMaskedValues) {
    // Should we filter here, or not?
    }
    if (collectTimingDetails) {
        // store the timing metadata using the documentRange endKey
        if (fieldIndexDocuments.hasNext() == false) {
            fieldIndexDocuments = Collections.singletonMap(this.range.getEndKey(), new Document()).entrySet().iterator();
        }
        fieldIndexDocuments = Iterators.transform(fieldIndexDocuments, new LogTiming(trackingSpan));
    }
    if (this.getReturnType() == ReturnType.kryo) {
        // Serialize the Document using Kryo
        this.serializedDocuments = Iterators.transform(fieldIndexDocuments, new KryoDocumentSerializer(isReducedResponse(), isCompressResults()));
    } else if (this.getReturnType() == ReturnType.writable) {
        // Use the Writable interface to serialize the Document
        this.serializedDocuments = Iterators.transform(fieldIndexDocuments, new WritableDocumentSerializer(isReducedResponse()));
    } else if (this.getReturnType() == ReturnType.tostring) {
        // Just return a toString() representation of the document
        this.serializedDocuments = Iterators.transform(fieldIndexDocuments, new ToStringDocumentSerializer(isReducedResponse()));
    } else {
        throw new IllegalArgumentException("Unknown return type of: " + this.getReturnType());
    }
    // Determine if we have items to return
    if (this.serializedDocuments.hasNext()) {
        Entry<Key, Value> entry = this.serializedDocuments.next();
        this.key = entry.getKey();
        this.value = entry.getValue();
        entry = null;
    } else {
        this.key = null;
        this.value = null;
    }
}
Also used : ToStringDocumentSerializer(datawave.query.function.serializer.ToStringDocumentSerializer) DataTypeAsField(datawave.query.function.DataTypeAsField) WritableDocumentSerializer(datawave.query.function.serializer.WritableDocumentSerializer) LogTiming(datawave.query.function.LogTiming) ConfigException(org.apache.zookeeper.server.quorum.QuorumPeerConfig.ConfigException) IOException(java.io.IOException) Document(datawave.query.attributes.Document) KryoDocumentSerializer(datawave.query.function.serializer.KryoDocumentSerializer) Entry(java.util.Map.Entry) EvaluationTrackingFunction(datawave.query.iterator.profile.EvaluationTrackingFunction) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) GetStartKey(datawave.query.function.GetStartKey) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 3 with EvaluationTrackingFunction

use of datawave.query.iterator.profile.EvaluationTrackingFunction in project datawave by NationalSecurityAgency.

the class QueryIterator method mapDocument.

protected Iterator<Entry<Key, Document>> mapDocument(SortedKeyValueIterator<Key, Value> deepSourceCopy, Iterator<Entry<Key, Document>> documents, CompositeMetadata compositeMetadata) {
    // now lets pull the data if we need to
    if (log.isTraceEnabled()) {
        log.trace("mapDocument " + fieldIndexSatisfiesQuery);
    }
    if (fieldIndexSatisfiesQuery) {
        final KeyToDocumentData docMapper = new KeyToDocumentData(deepSourceCopy, this.myEnvironment, this.documentOptions, super.equality, getEvaluationFilter(), this.includeHierarchyFields, this.includeHierarchyFields);
        Iterator<Tuple2<Key, Document>> mappedDocuments = Iterators.transform(documents, new GetDocument(docMapper, new Aggregation(this.getTimeFilter(), typeMetadataWithNonIndexed, compositeMetadata, this.isIncludeGroupingContext(), this.includeRecordId, this.disableIndexOnlyDocuments(), getEvaluationFilter(), isTrackSizes())));
        Iterator<Entry<Key, Document>> retDocuments = Iterators.transform(mappedDocuments, new TupleToEntry<>());
        // Inject the document permutations if required
        if (!this.getDocumentPermutations().isEmpty()) {
            if (gatherTimingDetails()) {
                retDocuments = Iterators.transform(retDocuments, new EvaluationTrackingFunction<>(QuerySpan.Stage.DocumentPermutation, trackingSpan, new DocumentPermutation.DocumentPermutationAggregation(this.getDocumentPermutations())));
            } else {
                retDocuments = Iterators.transform(retDocuments, new DocumentPermutation.DocumentPermutationAggregation(this.getDocumentPermutations()));
            }
        }
        return retDocuments;
    }
    return documents;
}
Also used : Aggregation(datawave.query.function.Aggregation) DocumentPermutation(datawave.query.function.DocumentPermutation) Entry(java.util.Map.Entry) TupleToEntry(datawave.query.util.TupleToEntry) EvaluationTrackingFunction(datawave.query.iterator.profile.EvaluationTrackingFunction) Tuple2(datawave.query.util.Tuple2) KeyToDocumentData(datawave.query.function.KeyToDocumentData)

Aggregations

EvaluationTrackingFunction (datawave.query.iterator.profile.EvaluationTrackingFunction)3 Entry (java.util.Map.Entry)3 Document (datawave.query.attributes.Document)2 Aggregation (datawave.query.function.Aggregation)2 DataTypeAsField (datawave.query.function.DataTypeAsField)2 DocumentPermutation (datawave.query.function.DocumentPermutation)2 KeyToDocumentData (datawave.query.function.KeyToDocumentData)2 TupleToEntry (datawave.query.util.TupleToEntry)2 Key (org.apache.accumulo.core.data.Key)2 AttributeKeepFilter (datawave.query.attributes.AttributeKeepFilter)1 DocumentMetadata (datawave.query.function.DocumentMetadata)1 GetStartKey (datawave.query.function.GetStartKey)1 LimitFields (datawave.query.function.LimitFields)1 LogTiming (datawave.query.function.LogTiming)1 MaskedValueFilterInterface (datawave.query.function.MaskedValueFilterInterface)1 RemoveGroupingContext (datawave.query.function.RemoveGroupingContext)1 KryoDocumentSerializer (datawave.query.function.serializer.KryoDocumentSerializer)1 ToStringDocumentSerializer (datawave.query.function.serializer.ToStringDocumentSerializer)1 WritableDocumentSerializer (datawave.query.function.serializer.WritableDocumentSerializer)1 DocumentData (datawave.query.iterator.aggregation.DocumentData)1