Search in sources :

Example 31 with RyaDAOException

use of org.apache.rya.api.persist.RyaDAOException in project incubator-rya by apache.

the class MongoLoadStatements method loadStatements.

@Override
public void loadStatements(final String ryaInstanceName, final Iterable<? extends Statement> statements) throws InstanceDoesNotExistException, RyaClientException {
    requireNonNull(ryaInstanceName);
    requireNonNull(statements);
    // Ensure the Rya Instance exists.
    if (!instanceExists.exists(ryaInstanceName)) {
        throw new InstanceDoesNotExistException(String.format("There is no Rya instance named '%s'.", ryaInstanceName));
    }
    Sail sail = null;
    SailRepositoryConnection sailRepoConn = null;
    try {
        // Get a Sail object that is connected to the Rya instance.
        final MongoDBRdfConfiguration ryaConf = connectionDetails.build(ryaInstanceName);
        sail = RyaSailFactory.getInstance(ryaConf);
        final SailRepository sailRepo = new SailRepository(sail);
        sailRepoConn = sailRepo.getConnection();
        // Load the statements.
        sailRepoConn = sailRepo.getConnection();
        sailRepoConn.add(statements);
    } catch (SailException | RyaDAOException | InferenceEngineException | AccumuloException | AccumuloSecurityException e) {
        throw new RyaClientException("Could not load statements into Rya because of a problem while creating the Sail object.", e);
    } catch (final RepositoryException e) {
        throw new RyaClientException("Could not load the statements into Rya.", e);
    } finally {
        // Close the resources that were opened.
        if (sailRepoConn != null) {
            try {
                sailRepoConn.close();
            } catch (final RepositoryException e) {
                log.error("Couldn't close the SailRepositoryConnection object.", e);
            }
        }
        if (sail != null) {
            try {
                sail.shutDown();
            } catch (final SailException e) {
                log.error("Couldn't close the Sail object.", e);
            }
        }
    }
}
Also used : AccumuloException(org.apache.accumulo.core.client.AccumuloException) RyaClientException(org.apache.rya.api.client.RyaClientException) SailRepository(org.openrdf.repository.sail.SailRepository) InferenceEngineException(org.apache.rya.rdftriplestore.inference.InferenceEngineException) RepositoryException(org.openrdf.repository.RepositoryException) InstanceDoesNotExistException(org.apache.rya.api.client.InstanceDoesNotExistException) SailException(org.openrdf.sail.SailException) SailRepositoryConnection(org.openrdf.repository.sail.SailRepositoryConnection) Sail(org.openrdf.sail.Sail) RyaDAOException(org.apache.rya.api.persist.RyaDAOException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) MongoDBRdfConfiguration(org.apache.rya.mongodb.MongoDBRdfConfiguration)

Example 32 with RyaDAOException

use of org.apache.rya.api.persist.RyaDAOException in project incubator-rya by apache.

the class FluoAndHistoricPcjsDemo method execute.

@Override
public void execute(final MiniAccumuloCluster accumulo, final Connector accumuloConn, final String ryaTablePrefix, final RyaSailRepository ryaRepo, final RepositoryConnection ryaConn, final MiniFluo fluo, final FluoClient fluoClient) throws DemoExecutionException {
    log.setLevel(Level.INFO);
    // 1. Introduce some RDF Statements that we are going to start with and
    // pause so the presenter can introduce this information to the audience.
    final Set<RyaStatement> relevantHistoricStatements = Sets.newHashSet(new RyaStatement(eve, talksTo, charlie), new RyaStatement(david, talksTo, alice), new RyaStatement(alice, worksAt, coffeeShop), new RyaStatement(bob, worksAt, coffeeShop));
    log.info("We add some Statements that are relevant to the query we will compute:");
    prettyLogStatements(relevantHistoricStatements);
    waitForEnter();
    log.info("We also some more Satements that aren't realted to the query we will compute");
    final Set<RyaStatement> otherHistoricStatements = Sets.newHashSet(new RyaStatement(henry, worksAt, burgerShop), new RyaStatement(irene, worksAt, burgerShop), new RyaStatement(justin, worksAt, burgerShop), new RyaStatement(kristi, worksAt, burgerShop), new RyaStatement(luke, worksAt, burgerShop), new RyaStatement(manny, worksAt, cupcakeShop), new RyaStatement(nate, worksAt, cupcakeShop), new RyaStatement(olivia, worksAt, cupcakeShop), new RyaStatement(paul, worksAt, cupcakeShop), new RyaStatement(ross, worksAt, cupcakeShop), new RyaStatement(henry, talksTo, irene), new RyaStatement(henry, talksTo, justin), new RyaStatement(kristi, talksTo, irene), new RyaStatement(luke, talksTo, irene), new RyaStatement(sally, talksTo, paul), new RyaStatement(sally, talksTo, ross), new RyaStatement(sally, talksTo, kristi), new RyaStatement(tim, talksTo, nate), new RyaStatement(tim, talksTo, paul), new RyaStatement(tim, talksTo, kristi));
    log.info("Theese statements will also be inserted into the core Rya tables:");
    prettyLogStatements(otherHistoricStatements);
    waitForEnter();
    // 2. Load the statements into the core Rya tables.
    log.info("Loading the historic statements into Rya...");
    loadDataIntoRya(ryaConn, relevantHistoricStatements);
    loadDataIntoRya(ryaConn, otherHistoricStatements);
    log.info("");
    // 3. Introduce the query that we're going to load into Fluo and pause so that the
    // presenter may show what they believe the expected output should be.
    final String sparql = "SELECT ?patron ?employee " + "WHERE { " + "?patron <http://talksTo> ?employee. " + "?employee <http://worksAt> <http://CoffeeShop>. " + "}";
    log.info("The following SPARQL query will be loaded into the Fluo application for incremental updates:");
    prettyLogSparql(sparql);
    waitForEnter();
    // 4. Write the query to Fluo and import the historic matches. Wait for the app to finish exporting results.
    log.info("Telling Fluo to maintain the query and import the historic Statement Pattern matches.");
    final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, ryaTablePrefix);
    final String pcjId;
    try {
        // Create the PCJ Index in Rya.
        pcjId = pcjStorage.createPcj(sparql);
        // Tell the Fluo app to maintain it.
        new CreateFluoPcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, ryaTablePrefix);
    } catch (MalformedQueryException | PcjException | RyaDAOException | UnsupportedQueryException e) {
        throw new DemoExecutionException("Error while using Fluo to compute and export historic matches, so the demo can not continue. Exiting.", e);
    }
    log.info("Waiting for the fluo application to finish exporting the initial results...");
    fluo.waitForObservers();
    log.info("Historic result exporting finished.");
    log.info("");
    // 5. Show that the Fluo app exported the results to the PCJ table in Accumulo.
    log.info("The following Binding Sets were exported to the PCJ with ID '" + pcjId + "' in Rya:");
    try (CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
        while (resultsIt.hasNext()) {
            log.info("    " + resultsIt.next());
        }
    } catch (final Exception e) {
        throw new DemoExecutionException("Could not fetch the PCJ's reuslts from Accumulo. Exiting.", e);
    }
    waitForEnter();
    // 6. Introduce some new Statements that we will stream into the Fluo app.
    final RyaStatement newLeft = new RyaStatement(george, talksTo, frank);
    final RyaStatement newRight = new RyaStatement(frank, worksAt, coffeeShop);
    final RyaStatement joinLeft = new RyaStatement(eve, talksTo, bob);
    final RyaStatement joinRight = new RyaStatement(charlie, worksAt, coffeeShop);
    final Set<RyaStatement> relevantstreamedStatements = Sets.newHashSet(newLeft, newRight, joinLeft, joinRight);
    log.info("We stream these relevant Statements into Fluo and the core Rya tables:");
    log.info(prettyFormat(newLeft) + "          - Part of a new result");
    log.info(prettyFormat(newRight) + "      - Other part of a new result");
    log.info(prettyFormat(joinLeft) + "               - Joins with a historic <http://talksTo> statement");
    log.info(prettyFormat(joinRight) + "    - Joins with a historic <http://worksA>t statement");
    waitForEnter();
    final Set<RyaStatement> otherStreamedStatements = Sets.newHashSet(new RyaStatement(alice, talksTo, tim), new RyaStatement(bob, talksTo, tim), new RyaStatement(charlie, talksTo, tim), new RyaStatement(frank, talksTo, tim), new RyaStatement(david, talksTo, tim), new RyaStatement(eve, talksTo, sally), new RyaStatement(george, talksTo, sally), new RyaStatement(henry, talksTo, sally), new RyaStatement(irene, talksTo, sally), new RyaStatement(justin, talksTo, sally), new RyaStatement(kristi, talksTo, manny), new RyaStatement(luke, talksTo, manny), new RyaStatement(manny, talksTo, paul), new RyaStatement(nate, talksTo, manny), new RyaStatement(olivia, talksTo, manny), new RyaStatement(paul, talksTo, kristi), new RyaStatement(ross, talksTo, kristi), new RyaStatement(sally, talksTo, kristi), new RyaStatement(olivia, talksTo, kristi), new RyaStatement(olivia, talksTo, kristi));
    log.info("We also stream these irrelevant Statements into Fluo and the core Rya tables:");
    prettyLogStatements(otherStreamedStatements);
    waitForEnter();
    // 7. Insert the new triples into the core Rya tables and the Fluo app.
    loadDataIntoRya(ryaConn, relevantstreamedStatements);
    loadDataIntoFluo(fluoClient, relevantstreamedStatements);
    log.info("Waiting for the fluo application to finish exporting the newly streamed results...");
    fluo.waitForObservers();
    log.info("Streamed result exporting finished.");
    log.info("");
    // 8. Show the new results have been exported to the PCJ table in Accumulo.
    log.info("The following Binding Sets were exported to the PCJ with ID '" + pcjId + "' in Rya:");
    try (CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
        while (resultsIt.hasNext()) {
            log.info("    " + resultsIt.next());
        }
    } catch (final Exception e) {
        throw new DemoExecutionException("Could not fetch the PCJ's reuslts from Accumulo. Exiting.", e);
    }
    log.info("");
}
Also used : BindingSet(org.openrdf.query.BindingSet) AccumuloPcjStorage(org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage) PcjException(org.apache.rya.indexing.pcj.storage.PcjException) UnsupportedQueryException(org.apache.rya.indexing.pcj.fluo.app.query.UnsupportedQueryException) RyaStatement(org.apache.rya.api.domain.RyaStatement) CreateFluoPcj(org.apache.rya.indexing.pcj.fluo.api.CreateFluoPcj) UnsupportedQueryException(org.apache.rya.indexing.pcj.fluo.app.query.UnsupportedQueryException) PcjException(org.apache.rya.indexing.pcj.storage.PcjException) RepositoryException(org.openrdf.repository.RepositoryException) MalformedQueryException(org.openrdf.query.MalformedQueryException) RyaDAOException(org.apache.rya.api.persist.RyaDAOException) PrecomputedJoinStorage(org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage) MalformedQueryException(org.openrdf.query.MalformedQueryException) RyaDAOException(org.apache.rya.api.persist.RyaDAOException)

Example 33 with RyaDAOException

use of org.apache.rya.api.persist.RyaDAOException in project incubator-rya by apache.

the class AccumuloRyaDAO method delete.

@Override
public void delete(final Iterator<RyaStatement> statements, final AccumuloRdfConfiguration conf) throws RyaDAOException {
    try {
        while (statements.hasNext()) {
            final RyaStatement stmt = statements.next();
            // query first
            final CloseableIteration<RyaStatement, RyaDAOException> query = this.queryEngine.query(stmt, conf);
            while (query.hasNext()) {
                deleteSingleRyaStatement(query.next());
            }
            for (final AccumuloIndexer index : secondaryIndexers) {
                index.deleteStatement(stmt);
            }
        }
        if (flushEachUpdate.get()) {
            mt_bw.flush();
        }
    } catch (final Exception e) {
        throw new RyaDAOException(e);
    }
}
Also used : AccumuloIndexer(org.apache.rya.accumulo.experimental.AccumuloIndexer) RyaStatement(org.apache.rya.api.domain.RyaStatement) RyaDAOException(org.apache.rya.api.persist.RyaDAOException) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) IOException(java.io.IOException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) RyaDAOException(org.apache.rya.api.persist.RyaDAOException)

Example 34 with RyaDAOException

use of org.apache.rya.api.persist.RyaDAOException in project incubator-rya by apache.

the class AccumuloRyaDAO method dropGraph.

@Override
public void dropGraph(final AccumuloRdfConfiguration conf, final RyaURI... graphs) throws RyaDAOException {
    BatchDeleter bd_spo = null;
    BatchDeleter bd_po = null;
    BatchDeleter bd_osp = null;
    try {
        bd_spo = createBatchDeleter(tableLayoutStrategy.getSpo(), conf.getAuthorizations());
        bd_po = createBatchDeleter(tableLayoutStrategy.getPo(), conf.getAuthorizations());
        bd_osp = createBatchDeleter(tableLayoutStrategy.getOsp(), conf.getAuthorizations());
        bd_spo.setRanges(Collections.singleton(new Range()));
        bd_po.setRanges(Collections.singleton(new Range()));
        bd_osp.setRanges(Collections.singleton(new Range()));
        for (final RyaURI graph : graphs) {
            bd_spo.fetchColumnFamily(new Text(graph.getData()));
            bd_po.fetchColumnFamily(new Text(graph.getData()));
            bd_osp.fetchColumnFamily(new Text(graph.getData()));
        }
        bd_spo.delete();
        bd_po.delete();
        bd_osp.delete();
    // TODO indexers do not support delete-UnsupportedOperation Exception will be thrown
    // for (AccumuloIndex index : secondaryIndexers) {
    // index.dropGraph(graphs);
    // }
    } catch (final Exception e) {
        throw new RyaDAOException(e);
    } finally {
        if (bd_spo != null) {
            bd_spo.close();
        }
        if (bd_po != null) {
            bd_po.close();
        }
        if (bd_osp != null) {
            bd_osp.close();
        }
    }
}
Also used : RyaURI(org.apache.rya.api.domain.RyaURI) BatchDeleter(org.apache.accumulo.core.client.BatchDeleter) RyaDAOException(org.apache.rya.api.persist.RyaDAOException) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) IOException(java.io.IOException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) RyaDAOException(org.apache.rya.api.persist.RyaDAOException)

Example 35 with RyaDAOException

use of org.apache.rya.api.persist.RyaDAOException in project incubator-rya by apache.

the class AccumuloRyaDAO method commit.

protected void commit(final Iterator<RyaStatement> commitStatements) throws RyaDAOException {
    try {
        // TODO: Should have a lock here in case we are adding and committing at the same time
        while (commitStatements.hasNext()) {
            final RyaStatement stmt = commitStatements.next();
            final Map<TABLE_LAYOUT, Collection<Mutation>> mutationMap = ryaTableMutationsFactory.serialize(stmt);
            final Collection<Mutation> spo = mutationMap.get(TABLE_LAYOUT.SPO);
            final Collection<Mutation> po = mutationMap.get(TABLE_LAYOUT.PO);
            final Collection<Mutation> osp = mutationMap.get(TABLE_LAYOUT.OSP);
            bw_spo.addMutations(spo);
            bw_po.addMutations(po);
            bw_osp.addMutations(osp);
            for (final AccumuloIndexer index : secondaryIndexers) {
                index.storeStatement(stmt);
            }
        }
        if (flushEachUpdate.get()) {
            mt_bw.flush();
        }
    } catch (final Exception e) {
        throw new RyaDAOException(e);
    }
}
Also used : TABLE_LAYOUT(org.apache.rya.api.RdfCloudTripleStoreConstants.TABLE_LAYOUT) AccumuloIndexer(org.apache.rya.accumulo.experimental.AccumuloIndexer) RyaStatement(org.apache.rya.api.domain.RyaStatement) Collection(java.util.Collection) RyaDAOException(org.apache.rya.api.persist.RyaDAOException) Mutation(org.apache.accumulo.core.data.Mutation) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) IOException(java.io.IOException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) RyaDAOException(org.apache.rya.api.persist.RyaDAOException)

Aggregations

RyaDAOException (org.apache.rya.api.persist.RyaDAOException)100 RyaStatement (org.apache.rya.api.domain.RyaStatement)61 RyaURI (org.apache.rya.api.domain.RyaURI)45 Test (org.junit.Test)39 RyaType (org.apache.rya.api.domain.RyaType)28 IOException (java.io.IOException)26 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)23 AccumuloException (org.apache.accumulo.core.client.AccumuloException)22 SailException (org.openrdf.sail.SailException)15 HashSet (java.util.HashSet)12 AccumuloRyaQueryEngine (org.apache.rya.accumulo.query.AccumuloRyaQueryEngine)12 RdfCloudTripleStoreUtils (org.apache.rya.api.RdfCloudTripleStoreUtils)12 Map (java.util.Map)11 AccumuloRdfConfiguration (org.apache.rya.accumulo.AccumuloRdfConfiguration)11 RyaClientException (org.apache.rya.api.client.RyaClientException)11 MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)10 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)10 ArrayList (java.util.ArrayList)9 Scanner (org.apache.accumulo.core.client.Scanner)8 Text (org.apache.hadoop.io.Text)8