use of com.ibm.cohort.cql.evaluation.CqlEvaluationRequests in project quality-measure-and-cohort-service by Alvearie.
the class SparkSchemaCreatorTest method testInvalidKeyColumn.
@Test(expected = IllegalArgumentException.class)
public void testInvalidKeyColumn() throws Exception {
ContextDefinitions contextDefinitions = makeContextDefinitions(Arrays.asList(makeContextDefinition("Context1Id", "Type1", "other")));
CqlEvaluationRequests cqlEvaluationRequests = makeEvaluationRequests(Arrays.asList(makeEvaluationRequest(new CqlLibraryDescriptor().setLibraryId("Context1Id").setVersion("1.0.0"), new HashSet<>(Collections.singletonList("define_boolean")), "Context1Id")));
SparkSchemaCreator schemaCreator = new SparkSchemaCreator(cqlLibraryProvider, cqlEvaluationRequests, contextDefinitions, outputColumnNameFactory, cqlTranslator);
schemaCreator.calculateSchemasForContexts(Arrays.asList("Context1Id"));
}
use of com.ibm.cohort.cql.evaluation.CqlEvaluationRequests in project quality-measure-and-cohort-service by Alvearie.
the class SparkSchemaCreatorTest method singleContextSupportedDefineTypes.
@Test
public void singleContextSupportedDefineTypes() throws Exception {
ContextDefinitions contextDefinitions = makeContextDefinitions(Collections.singletonList(makeContextDefinition("A", "Type1", "id")));
CqlEvaluationRequests cqlEvaluationRequests = makeEvaluationRequests(Arrays.asList(makeEvaluationRequest(new CqlLibraryDescriptor().setLibraryId("Context1Id").setVersion("1.0.0"), new HashSet<>(Arrays.asList("define_integer", "define_boolean", "define_string", "define_decimal")), "A"), makeEvaluationRequest(new CqlLibraryDescriptor().setLibraryId("Context2Id").setVersion("1.0.0"), new HashSet<>(Arrays.asList("define_date", "define_datetime")), "A")));
SparkSchemaCreator schemaCreator = new SparkSchemaCreator(cqlLibraryProvider, cqlEvaluationRequests, contextDefinitions, outputColumnNameFactory, cqlTranslator);
StructType actualSchema = schemaCreator.calculateSchemasForContexts(Arrays.asList("A")).get("A");
StructType expectedSchema = new StructType().add("id", DataTypes.IntegerType, false).add("parameters", DataTypes.StringType, false).add("Context1Id.define_integer", DataTypes.IntegerType, true).add("Context1Id.define_boolean", DataTypes.BooleanType, true).add("Context1Id.define_string", DataTypes.StringType, true).add("Context1Id.define_decimal", DataTypes.createDecimalType(28, 8), true).add("Context2Id.define_date", DataTypes.DateType, true).add("Context2Id.define_datetime", DataTypes.TimestampType, true);
validateSchemas(expectedSchema, actualSchema, "id");
}
use of com.ibm.cohort.cql.evaluation.CqlEvaluationRequests in project quality-measure-and-cohort-service by Alvearie.
the class SparkSchemaCreatorTest method testMultipleContextDefinitionsForContext.
@Test(expected = IllegalArgumentException.class)
public void testMultipleContextDefinitionsForContext() throws Exception {
ContextDefinitions contextDefinitions = makeContextDefinitions(Arrays.asList(makeContextDefinition("Context1Id", "Type1", "id"), makeContextDefinition("Context1Id", "Type1", "id")));
CqlEvaluationRequests cqlEvaluationRequests = makeEvaluationRequests(Arrays.asList(makeEvaluationRequest(new CqlLibraryDescriptor().setLibraryId("Context1Id").setVersion("1.0.0"), new HashSet<>(Collections.singletonList("define_boolean")), "Context1Id")));
SparkSchemaCreator schemaCreator = new SparkSchemaCreator(cqlLibraryProvider, cqlEvaluationRequests, contextDefinitions, outputColumnNameFactory, cqlTranslator);
schemaCreator.calculateSchemasForContexts(Arrays.asList("Context1Id"));
}
use of com.ibm.cohort.cql.evaluation.CqlEvaluationRequests in project quality-measure-and-cohort-service by Alvearie.
the class SparkSchemaCreatorTest method testDuplicateTypeInformation.
@Test(expected = IllegalArgumentException.class)
public void testDuplicateTypeInformation() throws Exception {
cqlTranslator.registerModelInfo(new File("src/test/resources/schema-validation/duplicate-type-model-info.xml"));
ContextDefinitions contextDefinitions = makeContextDefinitions(Arrays.asList(makeContextDefinition("Context1Id", "Type1", "integer")));
CqlEvaluationRequests cqlEvaluationRequests = makeEvaluationRequests(Arrays.asList(makeEvaluationRequest(new CqlLibraryDescriptor().setLibraryId("Context1Id").setVersion("1.0.0"), new HashSet<>(Collections.singletonList("define_integer")), "Context1Id")));
SparkSchemaCreator schemaCreator = new SparkSchemaCreator(cqlLibraryProvider, cqlEvaluationRequests, contextDefinitions, outputColumnNameFactory, cqlTranslator);
schemaCreator.calculateSchemasForContexts(Arrays.asList("Context1Id"));
}
use of com.ibm.cohort.cql.evaluation.CqlEvaluationRequests in project quality-measure-and-cohort-service by Alvearie.
the class SparkCqlEvaluatorTest method testParameterMatrixOutputDisabledRowsGroupingSuccess.
@Test
public void testParameterMatrixOutputDisabledRowsGroupingSuccess() throws Exception {
String outputLocation = "target/output/param-matrix-group-disabled/patient_cohort";
CqlEvaluationRequest template = new CqlEvaluationRequest();
template.setDescriptor(new CqlLibraryDescriptor().setLibraryId("SampleLibrary").setVersion("1.0.0"));
template.setExpressionsByNames(Collections.singleton("IsFemale"));
template.setContextKey("Patient");
template.setContextValue("NA");
CqlEvaluationRequests requests = new CqlEvaluationRequests();
requests.setEvaluations(new ArrayList<>());
List<Integer> ages = Arrays.asList(15, 17, 18);
for (Integer age : ages) {
Map<String, Parameter> parameters = new HashMap<>();
parameters.put("MinimumAge", new IntegerParameter(age));
CqlExpressionConfiguration renamed = new CqlExpressionConfiguration();
renamed.setName("IsFemale");
renamed.setOutputColumn("IsFemale" + age);
CqlEvaluationRequest request = new CqlEvaluationRequest(template);
request.setExpressions(Collections.singleton(renamed));
request.setParameters(parameters);
requests.getEvaluations().add(request);
}
ObjectMapper om = new ObjectMapper();
File jobsFile = new File("target/output/param-matrix-simple/cql-jobs.json");
if (!jobsFile.exists()) {
jobsFile.getParentFile().mkdirs();
}
FileUtils.write(jobsFile, om.writeValueAsString(requests), StandardCharsets.UTF_8);
try {
String[] args = new String[] { "-d", "src/test/resources/simple-job/context-definitions.json", "-j", jobsFile.getPath(), "-m", "src/test/resources/simple-job/modelinfo/simple-modelinfo-1.0.0.xml", "-c", "src/test/resources/simple-job/cql", "-i", "Patient=" + new File("src/test/resources/simple-job/testdata/patient").toURI().toString(), "-o", "Patient=" + new File(outputLocation).toURI().toString(), "--output-format", "delta", "--overwrite-output-for-contexts", "--metadata-output-path", outputLocation, "--disable-result-grouping" };
SparkCqlEvaluator.main(args);
validateOutputCountsAndColumns(outputLocation, new HashSet<>(Arrays.asList("id", "parameters", "IsFemale15", "IsFemale17", "IsFemale18")), 10, "delta");
} finally {
jobsFile.delete();
}
}
Aggregations