use of com.amazonaws.athena.connectors.neptune.propertygraph.Enums.TableSchemaMetaType in project aws-athena-query-federation by awslabs.
the class PropertyGraphHandler method executeQuery.
/**
* Used to read the row data associated with the provided Split.
*
* @param spiller A BlockSpiller that should be used to write the row
* data associated with this Split. The BlockSpiller
* automatically handles chunking the response,
* encrypting, and spilling to S3.
* @param recordsRequest Details of the read request, including: 1. The
* Split 2. The Catalog, Database, and Table the read
* request is for. 3. The filtering predicate (if any)
* 4. The columns required for projection.
* @param queryStatusChecker A QueryStatusChecker that you can use to stop doing
* work for a query that has already terminated
* @throws Exception
* @note Avoid writing >10 rows per-call to BlockSpiller.writeRow(...) because
* this will limit the BlockSpiller's ability to control Block size. The
* resulting increase in Block size may cause failures and reduced
* performance.
*/
public void executeQuery(ReadRecordsRequest recordsRequest, final QueryStatusChecker queryStatusChecker, final BlockSpiller spiller) throws Exception {
logger.debug("readWithConstraint: enter - " + recordsRequest.getSplit());
long numRows = 0;
Client client = neptuneConnection.getNeptuneClientConnection();
GraphTraversalSource graphTraversalSource = neptuneConnection.getTraversalSource(client);
GraphTraversal graphTraversal = null;
String labelName = recordsRequest.getTableName().getTableName();
GeneratedRowWriter.RowWriterBuilder builder = GeneratedRowWriter.newBuilder(recordsRequest.getConstraints());
String type = recordsRequest.getSchema().getCustomMetadata().get("componenttype");
TableSchemaMetaType tableSchemaMetaType = TableSchemaMetaType.valueOf(type.toUpperCase());
logger.debug("readWithConstraint: schema type is " + tableSchemaMetaType.toString());
if (tableSchemaMetaType != null) {
switch(tableSchemaMetaType) {
case VERTEX:
graphTraversal = graphTraversalSource.V().hasLabel(labelName);
getQueryPartForContraintsMap(graphTraversal, recordsRequest);
graphTraversal = graphTraversal.valueMap().with(WithOptions.tokens);
for (final Field nextField : recordsRequest.getSchema().getFields()) {
VertexRowWriter.writeRowTemplate(builder, nextField);
}
break;
case EDGE:
graphTraversal = graphTraversalSource.E().hasLabel(labelName);
getQueryPartForContraintsMap(graphTraversal, recordsRequest);
graphTraversal = graphTraversal.elementMap();
for (final Field nextField : recordsRequest.getSchema().getFields()) {
EdgeRowWriter.writeRowTemplate(builder, nextField);
}
break;
}
}
// log string equivalent of gremlin query
logger.debug("readWithConstraint: enter - " + GroovyTranslator.of("g").translate(graphTraversal.asAdmin().getBytecode()));
final GraphTraversal graphTraversalFinal1 = graphTraversal;
final GeneratedRowWriter rowWriter1 = builder.build();
while (graphTraversalFinal1.hasNext() && queryStatusChecker.isQueryRunning()) {
numRows++;
spiller.writeRows((final Block block, final int rowNum) -> {
final Map obj = (Map) graphTraversalFinal1.next();
return (rowWriter1.writeRow(block, rowNum, (Object) obj) ? 1 : 0);
});
}
logger.info("readWithConstraint: numRows[{}]", numRows);
}
Aggregations