use of org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions in project OpenLineage by OpenLineage.
the class LogicalRelationDatasetBuilderTest method testApply.
@ParameterizedTest
@ValueSource(strings = { "postgresql://postgreshost:5432/sparkdata", "jdbc:oracle:oci8:@sparkdata", "jdbc:oracle:thin@sparkdata:1521:orcl", "mysql://localhost/sparkdata" })
void testApply(String connectionUri) {
OpenLineage openLineage = new OpenLineage(OpenLineageClient.OPEN_LINEAGE_CLIENT_URI);
String jdbcUrl = "jdbc:" + connectionUri;
String sparkTableName = "my_spark_table";
JDBCRelation relation = new JDBCRelation(new StructType(new StructField[] { new StructField("name", StringType$.MODULE$, false, null) }), new Partition[] {}, new JDBCOptions(jdbcUrl, sparkTableName, Map$.MODULE$.<String, String>newBuilder().$plus$eq(Tuple2.apply("driver", Driver.class.getName())).result()), session);
QueryExecution qe = mock(QueryExecution.class);
when(qe.optimizedPlan()).thenReturn(new LogicalRelation(relation, Seq$.MODULE$.<AttributeReference>newBuilder().$plus$eq(new AttributeReference("name", StringType$.MODULE$, false, null, ExprId.apply(1L), Seq$.MODULE$.<String>empty())).result(), Option.empty(), false));
OpenLineageContext context = OpenLineageContext.builder().sparkContext(mock(SparkContext.class)).openLineage(openLineage).queryExecution(qe).build();
LogicalRelationDatasetBuilder visitor = new LogicalRelationDatasetBuilder<>(context, DatasetFactory.output(openLineage), false);
List<OutputDataset> datasets = visitor.apply(new SparkListenerJobStart(1, 1, Seq$.MODULE$.empty(), null));
assertEquals(1, datasets.size());
OutputDataset ds = datasets.get(0);
assertEquals(connectionUri, ds.getNamespace());
assertEquals(sparkTableName, ds.getName());
assertEquals(URI.create(connectionUri), ds.getFacets().getDataSource().getUri());
assertEquals(connectionUri, ds.getFacets().getDataSource().getName());
}
Aggregations