Search in sources :

Example 1 with DataTypeRequirementsProcessor

use of com.ibm.cohort.cql.spark.optimizer.DataTypeRequirementsProcessor in project quality-measure-and-cohort-service by Alvearie.

the class ColumnRuleCreator method getDataRequirementsForContext.

/**
 * Retrieve the merged set of data type and column filters for all CQL jobs that will
 * be evaluated for a given aggregation context.
 *
 * @param context ContextDefinition whose CQL jobs will be interrogated for data requirements
 * @return Map of data type to the fields in that datatype that are used by the CQL jobs
 */
public Map<String, Set<StringMatcher>> getDataRequirementsForContext(ContextDefinition context) {
    Map<CqlLibraryDescriptor, Set<String>> expressionsByLibrary = new HashMap<>();
    for (CqlEvaluationRequest request : requests) {
        Set<String> expressions = expressionsByLibrary.computeIfAbsent(request.getDescriptor(), desc -> new HashSet<>());
        request.getExpressions().stream().forEach(exp -> expressions.add(exp.getName()));
    }
    DataTypeRequirementsProcessor requirementsProcessor = new DataTypeRequirementsProcessor(cqlTranslator);
    Map<String, Set<StringMatcher>> pathsByDataType = new HashMap<>();
    for (Map.Entry<CqlLibraryDescriptor, Set<String>> entry : expressionsByLibrary.entrySet()) {
        LOG.debug("Extracting data requirements for {}", entry.getKey());
        DataTypeRequirementsProcessor.DataTypeRequirements requirements = requirementsProcessor.getDataRequirements(libraryProvider, entry.getKey(), entry.getValue());
        Map<String, Set<StringMatcher>> newPaths = requirements.allAsStringMatcher();
        newPaths.forEach((key, value) -> {
            pathsByDataType.merge(key, value, (prev, current) -> {
                prev.addAll(current);
                return prev;
            });
        });
    }
    Set<StringMatcher> contextFields = pathsByDataType.computeIfAbsent(context.getPrimaryDataType(), dt -> new HashSet<>());
    contextFields.add(new EqualsStringMatcher(context.getPrimaryKeyColumn()));
    if (context.getRelationships() != null) {
        for (Join join : context.getRelationships()) {
            Set<StringMatcher> joinFields = pathsByDataType.get(join.getRelatedDataType());
            if (joinFields != null) {
                joinFields.add(new EqualsStringMatcher(join.getRelatedKeyColumn()));
                joinFields.add(new EqualsStringMatcher(ContextRetriever.JOIN_CONTEXT_VALUE_IDX));
                // if the join key is not the primary key of the primary data table, then we need to add in the alternate key
                if (join.getPrimaryDataTypeColumn() != null) {
                    contextFields.add(new EqualsStringMatcher(join.getPrimaryDataTypeColumn()));
                }
                if (join instanceof ManyToMany) {
                    ManyToMany manyToMany = (ManyToMany) join;
                    Set<StringMatcher> associationFields = pathsByDataType.computeIfAbsent(manyToMany.getAssociationDataType(), dt -> new HashSet<>());
                    associationFields.add(new EqualsStringMatcher(manyToMany.getAssociationOneKeyColumn()));
                    associationFields.add(new EqualsStringMatcher(manyToMany.getAssociationManyKeyColumn()));
                }
                if (join instanceof MultiManyToMany) {
                    ManyToMany with = ((MultiManyToMany) join).getWith();
                    while (with != null) {
                        Set<StringMatcher> relatedFields = pathsByDataType.computeIfAbsent(with.getRelatedDataType(), dt -> new HashSet<>());
                        relatedFields.add(new EqualsStringMatcher(with.getRelatedKeyColumn()));
                        relatedFields.add(new EqualsStringMatcher(ContextRetriever.JOIN_CONTEXT_VALUE_IDX));
                        with = (with instanceof MultiManyToMany) ? ((MultiManyToMany) with).getWith() : null;
                    }
                }
            }
        }
    }
    pathsByDataType.values().forEach((matcherSet -> {
        matcherSet.add(new EqualsStringMatcher(ContextRetriever.SOURCE_FACT_IDX));
    }));
    return pathsByDataType;
}
Also used : EqualsStringMatcher(com.ibm.cohort.cql.util.EqualsStringMatcher) Logger(org.slf4j.Logger) CqlToElmTranslator(com.ibm.cohort.cql.translation.CqlToElmTranslator) DataTypeRequirementsProcessor(com.ibm.cohort.cql.spark.optimizer.DataTypeRequirementsProcessor) LoggerFactory(org.slf4j.LoggerFactory) Set(java.util.Set) CqlLibraryProvider(com.ibm.cohort.cql.library.CqlLibraryProvider) HashMap(java.util.HashMap) CqlLibraryDescriptor(com.ibm.cohort.cql.library.CqlLibraryDescriptor) HashSet(java.util.HashSet) StringMatcher(com.ibm.cohort.cql.util.StringMatcher) List(java.util.List) Map(java.util.Map) CqlEvaluationRequest(com.ibm.cohort.cql.evaluation.CqlEvaluationRequest) Set(java.util.Set) HashSet(java.util.HashSet) DataTypeRequirementsProcessor(com.ibm.cohort.cql.spark.optimizer.DataTypeRequirementsProcessor) HashMap(java.util.HashMap) CqlEvaluationRequest(com.ibm.cohort.cql.evaluation.CqlEvaluationRequest) EqualsStringMatcher(com.ibm.cohort.cql.util.EqualsStringMatcher) StringMatcher(com.ibm.cohort.cql.util.StringMatcher) EqualsStringMatcher(com.ibm.cohort.cql.util.EqualsStringMatcher) CqlLibraryDescriptor(com.ibm.cohort.cql.library.CqlLibraryDescriptor) HashMap(java.util.HashMap) Map(java.util.Map)

Aggregations

CqlEvaluationRequest (com.ibm.cohort.cql.evaluation.CqlEvaluationRequest)1 CqlLibraryDescriptor (com.ibm.cohort.cql.library.CqlLibraryDescriptor)1 CqlLibraryProvider (com.ibm.cohort.cql.library.CqlLibraryProvider)1 DataTypeRequirementsProcessor (com.ibm.cohort.cql.spark.optimizer.DataTypeRequirementsProcessor)1 CqlToElmTranslator (com.ibm.cohort.cql.translation.CqlToElmTranslator)1 EqualsStringMatcher (com.ibm.cohort.cql.util.EqualsStringMatcher)1 StringMatcher (com.ibm.cohort.cql.util.StringMatcher)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 List (java.util.List)1 Map (java.util.Map)1 Set (java.util.Set)1 Logger (org.slf4j.Logger)1 LoggerFactory (org.slf4j.LoggerFactory)1