Search in sources :

Example 1 with Column

use of org.apache.fluo.api.data.Column in project incubator-rya by apache.

the class FluoBinPruner method pruneBindingSetBin.

/**
 * This method deletes BindingSets in the specified bin from the BindingSet
 * Column of the indicated Fluo nodeId
 *
 * @param id
 *            - Fluo nodeId
 * @param bin
 *            - bin id
 */
@Override
public void pruneBindingSetBin(final NodeBin nodeBin) {
    final String id = nodeBin.getNodeId();
    final long bin = nodeBin.getBin();
    try (Transaction tx = client.newTransaction()) {
        final Optional<NodeType> type = NodeType.fromNodeId(id);
        if (!type.isPresent()) {
            log.trace("Unable to determine NodeType from id: " + id);
            throw new RuntimeException();
        }
        final Column batchInfoColumn = type.get().getResultColumn();
        final Bytes batchInfoSpanPrefix = BindingHashShardingFunction.getShardedScanPrefix(id, vf.createLiteral(bin));
        final SpanBatchDeleteInformation batchInfo = SpanBatchDeleteInformation.builder().setColumn(batchInfoColumn).setSpan(Span.prefix(batchInfoSpanPrefix)).build();
        BatchInformationDAO.addBatch(tx, id, batchInfo);
        tx.commit();
    }
}
Also used : Bytes(org.apache.fluo.api.data.Bytes) Transaction(org.apache.fluo.api.client.Transaction) Column(org.apache.fluo.api.data.Column) NodeType(org.apache.rya.indexing.pcj.fluo.app.NodeType) SpanBatchDeleteInformation(org.apache.rya.indexing.pcj.fluo.app.batch.SpanBatchDeleteInformation)

Example 2 with Column

use of org.apache.fluo.api.data.Column in project incubator-rya by apache.

the class JoinBatchBindingSetUpdater method processBatch.

/**
 * Processes {@link JoinBatchInformation}. Updates the BindingSets
 * associated with the specified nodeId. The BindingSets are processed in
 * batch fashion, where the number of results is indicated by
 * {@link JoinBatchInformation#getBatchSize()}. BindingSets are either
 * Added, Deleted, or Updated according to
 * {@link JoinBatchInformation#getTask()}. In the event that the number of
 * entries that need to be updated exceeds the batch size, the row of the
 * first unprocessed BindingSets is used to create a new JoinBatch job to
 * process the remaining BindingSets.
 * @throws Exception
 */
@Override
public void processBatch(final TransactionBase tx, final Bytes row, final BatchInformation batch) throws Exception {
    super.processBatch(tx, row, batch);
    final String nodeId = BatchRowKeyUtil.getNodeId(row);
    Preconditions.checkArgument(batch instanceof JoinBatchInformation);
    final JoinBatchInformation joinBatch = (JoinBatchInformation) batch;
    final Task task = joinBatch.getTask();
    // Figure out which join algorithm we are going to use.
    final IterativeJoin joinAlgorithm;
    switch(joinBatch.getJoinType()) {
        case NATURAL_JOIN:
            joinAlgorithm = new NaturalJoin();
            break;
        case LEFT_OUTER_JOIN:
            joinAlgorithm = new LeftOuterJoin();
            break;
        default:
            throw new RuntimeException("Unsupported JoinType: " + joinBatch.getJoinType());
    }
    final Set<VisibilityBindingSet> bsSet = new HashSet<>();
    final Optional<RowColumn> rowCol = fillSiblingBatch(tx, joinBatch, bsSet);
    // Iterates over the resulting BindingSets from the join.
    final Iterator<VisibilityBindingSet> newJoinResults;
    final VisibilityBindingSet bs = joinBatch.getBs();
    if (joinBatch.getSide() == Side.LEFT) {
        newJoinResults = joinAlgorithm.newLeftResult(bs, bsSet.iterator());
    } else {
        newJoinResults = joinAlgorithm.newRightResult(bsSet.iterator(), bs);
    }
    // Read join metadata, create new join BindingSets and insert them into the Fluo table.
    final JoinMetadata joinMetadata = CACHE.readJoinMetadata(tx, nodeId);
    final VariableOrder joinVarOrder = joinMetadata.getVariableOrder();
    while (newJoinResults.hasNext()) {
        final VisibilityBindingSet newJoinResult = newJoinResults.next();
        // create BindingSet value
        final Bytes bsBytes = BS_SERDE.serialize(newJoinResult);
        // make rowId
        Bytes rowKey = BindingHashShardingFunction.addShard(nodeId, joinVarOrder, newJoinResult);
        final Column col = FluoQueryColumns.JOIN_BINDING_SET;
        processTask(tx, task, rowKey, col, bsBytes);
    }
    // update the span and register updated batch job
    if (rowCol.isPresent()) {
        final Span newSpan = getNewSpan(rowCol.get(), joinBatch.getSpan());
        joinBatch.setSpan(newSpan);
        BatchInformationDAO.addBatch(tx, nodeId, joinBatch);
    }
}
Also used : Task(org.apache.rya.indexing.pcj.fluo.app.batch.BatchInformation.Task) VisibilityBindingSet(org.apache.rya.api.model.VisibilityBindingSet) RowColumn(org.apache.fluo.api.data.RowColumn) VariableOrder(org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder) IterativeJoin(org.apache.rya.api.function.join.IterativeJoin) JoinMetadata(org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata) Span(org.apache.fluo.api.data.Span) Bytes(org.apache.fluo.api.data.Bytes) RowColumn(org.apache.fluo.api.data.RowColumn) Column(org.apache.fluo.api.data.Column) NaturalJoin(org.apache.rya.api.function.join.NaturalJoin) LeftOuterJoin(org.apache.rya.api.function.join.LeftOuterJoin) HashSet(java.util.HashSet)

Example 3 with Column

use of org.apache.fluo.api.data.Column in project incubator-rya by apache.

the class SpanBatchBindingSetUpdater method processBatch.

/**
 * Process SpanBatchDeleteInformation objects by deleting all entries indicated by Span until batch limit is met.
 *
 * @param tx - Fluo Transaction
 * @param row - Byte row identifying BatchInformation
 * @param batch - SpanBatchDeleteInformation object to be processed
 */
@Override
public void processBatch(TransactionBase tx, Bytes row, BatchInformation batch) throws Exception {
    super.processBatch(tx, row, batch);
    Preconditions.checkArgument(batch instanceof SpanBatchDeleteInformation);
    SpanBatchDeleteInformation spanBatch = (SpanBatchDeleteInformation) batch;
    Optional<String> nodeId = spanBatch.getNodeId();
    Task task = spanBatch.getTask();
    int batchSize = spanBatch.getBatchSize();
    Span span = spanBatch.getSpan();
    Column column = batch.getColumn();
    Optional<RowColumn> rowCol = Optional.empty();
    switch(task) {
        case Add:
            log.trace("The Task Add is not supported for SpanBatchBindingSetUpdater.  Batch " + batch + " will not be processed.");
            break;
        case Delete:
            rowCol = deleteBatch(tx, nodeId, span, column, batchSize);
            break;
        case Update:
            log.trace("The Task Update is not supported for SpanBatchBindingSetUpdater.  Batch " + batch + " will not be processed.");
            break;
        default:
            log.trace("Invalid Task type.  Aborting batch operation.");
            break;
    }
    if (rowCol.isPresent()) {
        Span newSpan = getNewSpan(rowCol.get(), spanBatch.getSpan());
        log.trace("Batch size met.  There are remaining results that need to be deleted.  Creating a new batch of size: " + spanBatch.getBatchSize() + " with Span: " + newSpan + " and Column: " + column);
        spanBatch.setSpan(newSpan);
        BatchInformationDAO.addBatch(tx, BatchRowKeyUtil.getNodeId(row), spanBatch);
    }
}
Also used : Task(org.apache.rya.indexing.pcj.fluo.app.batch.BatchInformation.Task) RowColumn(org.apache.fluo.api.data.RowColumn) Column(org.apache.fluo.api.data.Column) RowColumn(org.apache.fluo.api.data.RowColumn) Span(org.apache.fluo.api.data.Span)

Example 4 with Column

use of org.apache.fluo.api.data.Column in project incubator-rya by apache.

the class ConstructQueryResultUpdater method updateConstructQueryResults.

/**
 * Updates the Construct Query results by applying the {@link ConnstructGraph} to
 * create a {@link RyaSubGraph} and then writing the subgraph to {@link FluoQueryColumns#CONSTRUCT_STATEMENTS}.
 * @param tx - transaction used to write the subgraph
 * @param bs - BindingSet that the ConstructProjection expands into a subgraph
 * @param metadata - metadata that the ConstructProjection is extracted from
 */
public void updateConstructQueryResults(TransactionBase tx, VisibilityBindingSet bs, ConstructQueryMetadata metadata) {
    String nodeId = metadata.getNodeId();
    VariableOrder varOrder = metadata.getVariableOrder();
    Column column = FluoQueryColumns.CONSTRUCT_STATEMENTS;
    ConstructGraph graph = metadata.getConstructGraph();
    String parentId = metadata.getParentNodeId();
    // Create the Row Key for the emitted binding set. It does not contain visibilities.
    final Bytes resultRow = makeRowKey(nodeId, varOrder, bs);
    // If this is a new binding set, then emit it.
    if (tx.get(resultRow, column) == null || varOrder.getVariableOrders().size() < bs.size()) {
        Set<RyaStatement> statements = graph.createGraphFromBindingSet(bs);
        RyaSubGraph subgraph = new RyaSubGraph(parentId, statements);
        final Bytes nodeValueBytes = Bytes.of(serializer.toBytes(subgraph));
        log.trace("Transaction ID: " + tx.getStartTimestamp() + "\n" + "New Binding Set: " + subgraph + "\n");
        tx.set(resultRow, column, nodeValueBytes);
    }
}
Also used : Bytes(org.apache.fluo.api.data.Bytes) RyaSubGraph(org.apache.rya.api.domain.RyaSubGraph) Column(org.apache.fluo.api.data.Column) VariableOrder(org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder) RyaStatement(org.apache.rya.api.domain.RyaStatement)

Example 5 with Column

use of org.apache.fluo.api.data.Column in project incubator-rya by apache.

the class BatchIT method createSpanBatches.

private void createSpanBatches(FluoClient fluoClient, List<String> ids, List<String> prefixes, int batchSize) {
    Preconditions.checkArgument(ids.size() == prefixes.size());
    try (Transaction tx = fluoClient.newTransaction()) {
        for (int i = 0; i < ids.size(); i++) {
            String id = ids.get(i);
            String bsPrefix = prefixes.get(i);
            URI uri = vf.createURI(bsPrefix);
            Bytes prefixBytes = BindingHashShardingFunction.getShardedScanPrefix(id, uri);
            NodeType type = NodeType.fromNodeId(id).get();
            Column bsCol = type.getResultColumn();
            SpanBatchDeleteInformation.Builder builder = SpanBatchDeleteInformation.builder().setBatchSize(batchSize).setColumn(bsCol);
            if (type == NodeType.JOIN) {
                builder.setSpan(Span.prefix(type.getNodeTypePrefix()));
                builder.setNodeId(java.util.Optional.of(id));
            } else {
                builder.setSpan(Span.prefix(prefixBytes));
            }
            BatchInformationDAO.addBatch(tx, id, builder.build());
        }
        tx.commit();
    }
}
Also used : Bytes(org.apache.fluo.api.data.Bytes) Transaction(org.apache.fluo.api.client.Transaction) Column(org.apache.fluo.api.data.Column) NodeType(org.apache.rya.indexing.pcj.fluo.app.NodeType) URI(org.openrdf.model.URI) RyaURI(org.apache.rya.api.domain.RyaURI) SpanBatchDeleteInformation(org.apache.rya.indexing.pcj.fluo.app.batch.SpanBatchDeleteInformation)

Aggregations

Column (org.apache.fluo.api.data.Column)22 VariableOrder (org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder)13 Bytes (org.apache.fluo.api.data.Bytes)9 RowColumn (org.apache.fluo.api.data.RowColumn)9 Span (org.apache.fluo.api.data.Span)8 JsonObject (com.google.gson.JsonObject)4 VisibilityBindingSet (org.apache.rya.api.model.VisibilityBindingSet)4 HashSet (java.util.HashSet)3 Task (org.apache.rya.indexing.pcj.fluo.app.batch.BatchInformation.Task)3 JsonPrimitive (com.google.gson.JsonPrimitive)2 Transaction (org.apache.fluo.api.client.Transaction)2 IterativeJoin (org.apache.rya.api.function.join.IterativeJoin)2 Side (org.apache.rya.api.function.join.LazyJoiningIterator.Side)2 LeftOuterJoin (org.apache.rya.api.function.join.LeftOuterJoin)2 NaturalJoin (org.apache.rya.api.function.join.NaturalJoin)2 NodeType (org.apache.rya.indexing.pcj.fluo.app.NodeType)2 SpanBatchDeleteInformation (org.apache.rya.indexing.pcj.fluo.app.batch.SpanBatchDeleteInformation)2 JoinType (org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata.JoinType)2 IOException (java.io.IOException)1 InvalidClassException (java.io.InvalidClassException)1