use of au.csiro.pathling.test.builders.ResourceDatasetBuilder in project pathling by aehrc.
the class CountFunctionTest method countsByResourceIdentity.
@Test
void countsByResourceIdentity() {
final Dataset<Row> patientDataset = new ResourceDatasetBuilder(spark).withIdColumn().withColumn("gender", DataTypes.StringType).withColumn("active", DataTypes.BooleanType).withRow("patient-1", "female", true).withRow("patient-2", "female", false).withRow("patient-3", "male", true).build();
when(database.read(ResourceType.PATIENT)).thenReturn(patientDataset);
final ResourcePath inputPath = ResourcePath.build(fhirContext, database, ResourceType.PATIENT, "Patient", false);
final ParserContext parserContext = new ParserContextBuilder(spark, fhirContext).idColumn(inputPath.getIdColumn()).groupingColumns(Collections.singletonList(inputPath.getIdColumn())).inputExpression("Patient").build();
final NamedFunctionInput countInput = new NamedFunctionInput(parserContext, inputPath, Collections.emptyList());
final NamedFunction count = NamedFunction.getInstance("count");
final FhirPath result = count.invoke(countInput);
final Dataset<Row> expectedDataset = new DatasetBuilder(spark).withIdColumn().withColumn(DataTypes.LongType).withRow("patient-1", 1L).withRow("patient-2", 1L).withRow("patient-3", 1L).build();
assertThat(result).hasExpression("count()").isSingular().isElementPath(IntegerPath.class).hasFhirType(FHIRDefinedType.UNSIGNEDINT).selectOrderedResult().hasRows(expectedDataset);
}
use of au.csiro.pathling.test.builders.ResourceDatasetBuilder in project pathling by aehrc.
the class CountFunctionTest method countsByGrouping.
@Test
void countsByGrouping() {
final Dataset<Row> inputDataset = new ResourceDatasetBuilder(spark).withIdColumn().withColumn("gender", DataTypes.StringType).withColumn("active", DataTypes.BooleanType).withRow("patient-1", "female", true).withRow("patient-2", "female", false).withRow("patient-2", "male", true).build();
when(database.read(ResourceType.PATIENT)).thenReturn(inputDataset);
final ResourcePath inputPath = new ResourcePathBuilder(spark).database(database).resourceType(ResourceType.PATIENT).expression("Patient").build();
final Column groupingColumn = inputPath.getElementColumn("gender");
final ParserContext parserContext = new ParserContextBuilder(spark, fhirContext).groupingColumns(Collections.singletonList(groupingColumn)).inputExpression("Patient").build();
final NamedFunctionInput countInput = new NamedFunctionInput(parserContext, inputPath, Collections.emptyList());
final NamedFunction count = NamedFunction.getInstance("count");
final FhirPath result = count.invoke(countInput);
final Dataset<Row> expectedDataset = new DatasetBuilder(spark).withColumn(DataTypes.StringType).withColumn(DataTypes.LongType).withRow("female", 2L).withRow("male", 1L).build();
assertThat(result).hasExpression("count()").isSingular().isElementPath(IntegerPath.class).hasFhirType(FHIRDefinedType.UNSIGNEDINT).selectGroupingResult(Collections.singletonList(groupingColumn)).hasRows(expectedDataset);
}
use of au.csiro.pathling.test.builders.ResourceDatasetBuilder in project pathling by aehrc.
the class ExtensionFunctionTest method testExtensionOnElements.
@Test
public void testExtensionOnElements() {
final ParserContext parserContext = new ParserContextBuilder(spark, fhirContext).build();
// Construct element dataset from the resource dataset so that the resource path
// can be used as the current resource for this element path
// Note: this resource path is not singular as this will be a base for elements.
final Dataset<Row> resourceLikeDataset = new ResourceDatasetBuilder(spark).withIdColumn().withEidColumn().withStructColumn("name", DataTypes.StringType).withStructColumn("_fid", DataTypes.IntegerType).withStructValueColumn().withExtensionColumn().withRow("observation-1", makeEid(0), RowFactory.create("name1", 0), oneEntryMap(0, MANY_MY_EXTENSIONS)).withRow("observation-2", makeEid(0), RowFactory.create("name2", 1), oneEntryMap(1, ONE_MY_EXTENSION)).withRow("observation-3", makeEid(0), RowFactory.create("name3", 2), oneEntryMap(2, NO_MY_EXTENSIONS)).withRow("observation-4", makeEid(0), RowFactory.create("name4", 3), oneEntryMap(3, ONE_MY_EXTENSION)).withRow("observation-4", makeEid(1), RowFactory.create("name5", 4), oneEntryMap(3, ONE_MY_EXTENSION)).withRow("observation-5", makeEid(0), null, null).withRow("observation-5", makeEid(1), null, null).build();
when(database.read(ResourceType.OBSERVATION)).thenReturn(resourceLikeDataset);
final ResourcePath baseResourcePath = ResourcePath.build(fhirContext, database, ResourceType.OBSERVATION, "Observation", false);
final Dataset<Row> elementDataset = toElementDataset(resourceLikeDataset, baseResourcePath);
final ElementDefinition codeDefinition = checkPresent(FhirHelpers.getChildOfResource(fhirContext, "Observation", "code"));
final ElementPath inputPath = new ElementPathBuilder(spark).fhirType(FHIRDefinedType.CODEABLECONCEPT).definition(codeDefinition).dataset(elementDataset).idAndEidAndValueColumns().expression("code").singular(false).currentResource(baseResourcePath).buildDefined();
final StringLiteralPath argumentExpression = StringLiteralPath.fromString("'" + "uuid:myExtension" + "'", inputPath);
final NamedFunctionInput extensionInput = new NamedFunctionInput(parserContext, inputPath, Collections.singletonList(argumentExpression));
final NamedFunction extension = NamedFunction.getInstance("extension");
final FhirPath result = extension.invoke(extensionInput);
final Dataset<Row> expectedResult = new DatasetBuilder(spark).withIdColumn().withEidColumn().withStructTypeColumns(DatasetBuilder.SIMPLE_EXTENSION_TYPE).withRow("observation-1", makeEid(0, 0), null).withRow("observation-1", makeEid(0, 1), null).withRow("observation-1", makeEid(0, 2), MANY_EXT_ROW_1).withRow("observation-1", makeEid(0, 3), MANY_EXT_ROW_2).withRow("observation-2", makeEid(0, 0), null).withRow("observation-2", makeEid(0, 1), ONE_EXT_ROW_1).withRow("observation-3", makeEid(0, 0), null).withRow("observation-3", makeEid(0, 1), null).withRow("observation-4", makeEid(0, 0), null).withRow("observation-4", makeEid(0, 1), ONE_EXT_ROW_1).withRow("observation-4", makeEid(1, 0), null).withRow("observation-5", makeEid(0, 0), null).withRow("observation-5", makeEid(1, 0), null).buildWithStructValue();
assertThat(result).hasExpression("code.extension('uuid:myExtension')").isNotSingular().isElementPath(ElementPath.class).hasFhirType(FHIRDefinedType.EXTENSION).selectOrderedResultWithEid().hasRows(expectedResult);
}
use of au.csiro.pathling.test.builders.ResourceDatasetBuilder in project pathling by aehrc.
the class FirstFunctionTest method firstOfRootResources.
@Test
void firstOfRootResources() {
final Dataset<Row> patientDataset = new ResourceDatasetBuilder(spark).withIdColumn().withColumn("gender", DataTypes.StringType).withColumn("active", DataTypes.BooleanType).withRow("patient-1", "female", true).withRow("patient-2", "female", false).withRow("patient-3", "male", true).build();
when(database.read(ResourceType.PATIENT)).thenReturn(patientDataset);
final ResourcePath inputPath = ResourcePath.build(fhirContext, database, ResourceType.PATIENT, "Patient", true);
final ParserContext parserContext = new ParserContextBuilder(spark, fhirContext).groupingColumns(Collections.singletonList(inputPath.getIdColumn())).build();
final NamedFunctionInput firstInput = new NamedFunctionInput(parserContext, inputPath, Collections.emptyList());
final NamedFunction firstFunction = NamedFunction.getInstance("first");
final FhirPath result = firstFunction.invoke(firstInput);
assertTrue(result instanceof ResourcePath);
assertThat((ResourcePath) result).hasExpression("Patient.first()").isSingular().hasResourceType(ResourceType.PATIENT);
final Dataset<Row> expectedDataset = new DatasetBuilder(spark).withIdColumn().withColumn(DataTypes.StringType).withRow("patient-1", "patient-1").withRow("patient-2", "patient-2").withRow("patient-3", "patient-3").build();
assertThat(result).selectOrderedResult().hasRows(expectedDataset);
}
use of au.csiro.pathling.test.builders.ResourceDatasetBuilder in project pathling by aehrc.
the class IifFunctionTest method returnsCorrectResultsForLiteralAndNonLiteral.
@Test
void returnsCorrectResultsForLiteralAndNonLiteral() {
final Dataset<Row> inputContextDataset = new ResourceDatasetBuilder(spark).withIdColumn().withRow("observation-1").withRow("observation-2").withRow("observation-3").withRow("observation-4").withRow("observation-5").build();
when(database.read(ResourceType.OBSERVATION)).thenReturn(inputContextDataset);
final ResourcePath inputContext = new ResourcePathBuilder(spark).expression("Observation").resourceType(ResourceType.OBSERVATION).database(database).singular(true).build();
final Dataset<Row> inputDataset = new DatasetBuilder(spark).withIdColumn(ID_ALIAS).withEidColumn().withColumn(DataTypes.BooleanType).withRow("observation-1", makeEid(0), false).withRow("observation-2", makeEid(0), true).withRow("observation-3", makeEid(0), null).withRow("observation-4", makeEid(0), true).withRow("observation-4", makeEid(1), false).withRow("observation-5", makeEid(0), null).withRow("observation-5", makeEid(1), null).build();
final ElementPath inputPath = new ElementPathBuilder(spark).fhirType(FHIRDefinedType.BOOLEAN).dataset(inputDataset).idAndEidAndValueColumns().expression("valueBoolean").singular(false).build();
parserContext = new ParserContextBuilder(spark, fhirContext).groupingColumns(Collections.singletonList(inputPath.getIdColumn())).build();
final NonLiteralPath condition = inputPath.toThisPath();
final Dataset<Row> ifTrueDataset = new DatasetBuilder(spark).withIdColumn(ID_ALIAS).withEidColumn().withColumn(DataTypes.IntegerType).withRow("observation-1", makeEid(0), 1).withRow("observation-2", makeEid(0), 2).withRow("observation-3", makeEid(0), 3).withRow("observation-4", makeEid(0), 4).withRow("observation-5", makeEid(0), 5).build();
final ElementPath ifTrue = new ElementPathBuilder(spark).fhirType(FHIRDefinedType.INTEGER).dataset(ifTrueDataset).idAndEidAndValueColumns().expression("someInteger").singular(true).build();
final IntegerLiteralPath otherwise = IntegerLiteralPath.fromString("99", inputContext);
final NamedFunctionInput iifInput1 = new NamedFunctionInput(parserContext, inputPath, Arrays.asList(condition, ifTrue, otherwise));
final FhirPath result1 = NamedFunction.getInstance("iif").invoke(iifInput1);
final Dataset<Row> expectedDataset1 = new DatasetBuilder(spark).withIdColumn(ID_ALIAS).withEidColumn().withColumn(DataTypes.IntegerType).withRow("observation-1", makeEid(0), 99).withRow("observation-2", makeEid(0), 2).withRow("observation-3", makeEid(0), 99).withRow("observation-4", makeEid(0), 4).withRow("observation-4", makeEid(1), 99).withRow("observation-5", makeEid(0), 99).withRow("observation-5", makeEid(1), 99).build();
assertThat(result1).hasExpression("valueBoolean.iif($this, someInteger, 99)").isNotSingular().isElementPath(IntegerPath.class).selectOrderedResultWithEid().hasRows(expectedDataset1);
final NamedFunctionInput iifInput2 = new NamedFunctionInput(parserContext, inputPath, Arrays.asList(condition, otherwise, ifTrue));
final FhirPath result2 = NamedFunction.getInstance("iif").invoke(iifInput2);
final Dataset<Row> expectedDataset2 = new DatasetBuilder(spark).withIdColumn(ID_ALIAS).withEidColumn().withColumn(DataTypes.IntegerType).withRow("observation-1", makeEid(0), 1).withRow("observation-2", makeEid(0), 99).withRow("observation-3", makeEid(0), 3).withRow("observation-4", makeEid(0), 99).withRow("observation-4", makeEid(1), 4).withRow("observation-5", makeEid(0), 5).withRow("observation-5", makeEid(1), 5).build();
assertThat(result2).hasExpression("valueBoolean.iif($this, 99, someInteger)").isNotSingular().isElementPath(IntegerPath.class).selectOrderedResultWithEid().hasRows(expectedDataset2);
}
Aggregations