use of org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder in project incubator-rya by apache.
the class FluoQueryMetadataDAOIT method statementPatternMetadataTest.
@Test
public void statementPatternMetadataTest() throws RepositoryException {
final FluoQueryMetadataDAO dao = new FluoQueryMetadataDAO();
// Create the object that will be serialized.
final StatementPatternMetadata.Builder builder = StatementPatternMetadata.builder("nodeId");
builder.setVarOrder(new VariableOrder("a;b;c"));
builder.setStatementPattern("statementPattern");
builder.setParentNodeId("parentNodeId");
final StatementPatternMetadata originalMetadata = builder.build();
try (FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
// Write it to the Fluo table.
try (Transaction tx = fluoClient.newTransaction()) {
dao.write(tx, originalMetadata);
tx.commit();
}
// Read it from the Fluo table.
StatementPatternMetadata storedMetadata = null;
try (Snapshot sx = fluoClient.newSnapshot()) {
storedMetadata = dao.readStatementPatternMetadata(sx, "nodeId");
}
// Ensure the deserialized object is the same as the serialized one.
assertEquals(originalMetadata, storedMetadata);
}
}
use of org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder in project incubator-rya by apache.
the class FluoQueryMetadataDAOIT method queryMetadataTest.
@Test
public void queryMetadataTest() {
final FluoQueryMetadataDAO dao = new FluoQueryMetadataDAO();
// Create the object that will be serialized.
final String queryId = NodeType.generateNewFluoIdForType(NodeType.QUERY);
final QueryMetadata.Builder builder = QueryMetadata.builder(queryId);
builder.setQueryType(QueryType.PROJECTION);
builder.setVarOrder(new VariableOrder("y;s;d"));
builder.setSparql("sparql string");
builder.setChildNodeId("childNodeId");
builder.setExportStrategies(new HashSet<>(Arrays.asList(ExportStrategy.KAFKA)));
final QueryMetadata originalMetadata = builder.build();
try (FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
// Write it to the Fluo table.
try (Transaction tx = fluoClient.newTransaction()) {
dao.write(tx, originalMetadata);
tx.commit();
}
// Read it from the Fluo table.
QueryMetadata storedMetdata = null;
try (Snapshot sx = fluoClient.newSnapshot()) {
storedMetdata = dao.readQueryMetadata(sx, queryId);
}
// Ensure the deserialized object is the same as the serialized one.
assertEquals(originalMetadata, storedMetdata);
}
}
use of org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder in project incubator-rya by apache.
the class ParsedQueryRequestTest method parseHasVarOrders.
@Test
public void parseHasVarOrders() throws IOException {
final String requestText = "#prefix a, b,c\n" + "#prefix b, c, a\n" + "SELECT * \n" + "WHERE { \n" + " ?a <http://talksTo> ?b. \n" + " ?b <http://talksTo> ?c. \n" + "}";
final ParsedQueryRequest expected = new ParsedQueryRequest("SELECT * \n" + "WHERE { \n" + " ?a <http://talksTo> ?b. \n" + " ?b <http://talksTo> ?c. \n" + "}", Sets.newHashSet(new VariableOrder("a", "b", "c"), new VariableOrder("b", "c", "a")));
final ParsedQueryRequest request = ParsedQueryRequest.parse(requestText);
assertEquals(expected, request);
}
use of org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder in project incubator-rya by apache.
the class ListFluoQueriesIT method queryMetadataTest.
@Test
public void queryMetadataTest() throws Exception {
final FluoQueryMetadataDAO dao = new FluoQueryMetadataDAO();
String sparql1 = "select ?x ?y ?z where {?x <uri:p1> ?y; <uri:p2> 'literal1'. ?y <uri:p3> ?z }";
String sparql2 = "select ?x ?y ?z where {{select ?x ?y ?z {?x <uri:p1> ?y; <uri:p2> ?z. ?y <uri:p3> ?z }}}";
// Create the object that will be serialized.
String queryId1 = NodeType.generateNewFluoIdForType(NodeType.QUERY);
final QueryMetadata.Builder builder = QueryMetadata.builder(queryId1);
builder.setQueryType(QueryType.PROJECTION);
builder.setVarOrder(new VariableOrder("y;s;d"));
builder.setSparql(sparql1);
builder.setChildNodeId("childNodeId");
builder.setExportStrategies(new HashSet<>(Arrays.asList(ExportStrategy.KAFKA)));
final QueryMetadata meta1 = builder.build();
String queryId2 = NodeType.generateNewFluoIdForType(NodeType.QUERY);
final QueryMetadata.Builder builder2 = QueryMetadata.builder(queryId2);
builder2.setQueryType(QueryType.PROJECTION);
builder2.setVarOrder(new VariableOrder("y;s;d"));
builder2.setSparql(sparql2);
builder2.setChildNodeId("childNodeId");
builder2.setExportStrategies(new HashSet<>(Arrays.asList(ExportStrategy.RYA)));
final QueryMetadata meta2 = builder2.build();
try (FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
// Write it to the Fluo table.
try (Transaction tx = fluoClient.newTransaction()) {
dao.write(tx, meta1);
dao.write(tx, meta2);
tx.commit();
}
ListFluoQueries listFluoQueries = new ListFluoQueries();
List<String> queries = listFluoQueries.listFluoQueries(fluoClient);
FluoQueryStringBuilder queryBuilder1 = new FluoQueryStringBuilder();
String expected1 = queryBuilder1.setQueryId(queryId1).setQueryType(QueryType.PROJECTION).setQuery(sparql1).setExportStrategies(Sets.newHashSet(ExportStrategy.KAFKA)).build();
FluoQueryStringBuilder queryBuilder2 = new FluoQueryStringBuilder();
String expected2 = queryBuilder2.setQueryId(queryId2).setQueryType(QueryType.PROJECTION).setQuery(sparql2).setExportStrategies(Sets.newHashSet(ExportStrategy.RYA)).build();
Set<String> expected = new HashSet<>();
expected.add(expected1);
expected.add(expected2);
Assert.assertEquals(expected, Sets.newHashSet(queries));
}
}
use of org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder in project incubator-rya by apache.
the class BindingHashShardingFunctionTest method bindingSetRowTest.
@Test
public void bindingSetRowTest() {
String nodeId = NodeType.generateNewFluoIdForType(NodeType.STATEMENT_PATTERN);
QueryBindingSet bs = new QueryBindingSet();
bs.addBinding("entity", vf.createURI("urn:entity"));
bs.addBinding("location", vf.createLiteral("location_1"));
VisibilityBindingSet vBs = new VisibilityBindingSet(bs);
VariableOrder varOrder = new VariableOrder("entity", "location");
Bytes row = RowKeyUtil.makeRowKey(nodeId, varOrder, vBs);
Bytes shardedRow = BindingHashShardingFunction.addShard(nodeId, varOrder, vBs);
BindingSetRow expected = BindingSetRow.make(row);
BindingSetRow actual = BindingSetRow.makeFromShardedRow(Bytes.of(SP_PREFIX), shardedRow);
Assert.assertEquals(expected, actual);
}
Aggregations