use of org.apache.rya.mongodb.batch.MongoDbBatchWriterException in project incubator-rya by apache.
the class AbstractMongoIndexer method initCore.
protected void initCore() {
dbName = conf.getMongoDBName();
this.mongoClient = conf.getMongoClient();
db = this.mongoClient.getDB(dbName);
final String collectionName = conf.get(MongoDBRdfConfiguration.MONGO_COLLECTION_PREFIX, "rya") + getCollectionName();
collection = db.getCollection(collectionName);
flushEachUpdate = ((MongoDBRdfConfiguration) conf).flushEachUpdate();
final MongoDbBatchWriterConfig mongoDbBatchWriterConfig = MongoDbBatchWriterUtils.getMongoDbBatchWriterConfig(conf);
mongoDbBatchWriter = new MongoDbBatchWriter<>(new DbCollectionType(collection), mongoDbBatchWriterConfig);
try {
mongoDbBatchWriter.start();
} catch (final MongoDbBatchWriterException e) {
LOG.error("Error start MongoDB batch writer", e);
}
}
use of org.apache.rya.mongodb.batch.MongoDbBatchWriterException in project incubator-rya by apache.
the class MongoDBRyaDAO method add.
@Override
public void add(final Iterator<RyaStatement> statementIter) throws RyaDAOException {
final List<DBObject> dbInserts = new ArrayList<>();
while (statementIter.hasNext()) {
final RyaStatement ryaStatement = statementIter.next();
final boolean canAdd = DocumentVisibilityUtil.doesUserHaveDocumentAccess(auths, ryaStatement.getColumnVisibility());
if (canAdd) {
final DBObject insert = storageStrategy.serialize(ryaStatement);
dbInserts.add(insert);
try {
for (final RyaSecondaryIndexer index : secondaryIndexers) {
index.storeStatement(ryaStatement);
}
} catch (final IOException e) {
log.error("Failed to add: " + ryaStatement.toString() + " to the indexer");
}
} else {
throw new RyaDAOException("User does not have the required authorizations to add statement");
}
}
try {
mongoDbBatchWriter.addObjectsToQueue(dbInserts);
if (flushEachUpdate.get()) {
flush();
}
} catch (final MongoDbBatchWriterException e) {
throw new RyaDAOException("Error adding statements", e);
}
}
use of org.apache.rya.mongodb.batch.MongoDbBatchWriterException in project incubator-rya by apache.
the class MongoDBRyaDAO method destroy.
@Override
public void destroy() throws RyaDAOException {
if (!isInitialized.get()) {
return;
}
isInitialized.set(false);
flush();
try {
mongoDbBatchWriter.shutdown();
} catch (final MongoDbBatchWriterException e) {
throw new RyaDAOException("Error shutting down MongoDB batch writer", e);
}
for (final MongoSecondaryIndex indexer : secondaryIndexers) {
try {
indexer.close();
} catch (final IOException e) {
log.error("Error closing indexer: " + indexer.getClass().getSimpleName(), e);
}
}
IOUtils.closeQuietly(queryEngine);
}
use of org.apache.rya.mongodb.batch.MongoDbBatchWriterException in project incubator-rya by apache.
the class MongoDBRyaDAO method flush.
@Override
public void flush() throws RyaDAOException {
try {
mongoDbBatchWriter.flush();
flushIndexers();
} catch (final MongoDbBatchWriterException e) {
throw new RyaDAOException("Error flushing data.", e);
}
}
use of org.apache.rya.mongodb.batch.MongoDbBatchWriterException in project incubator-rya by apache.
the class MongoPipelineStrategy method executeConstructRule.
/**
* Execute a CONSTRUCT rule by converting it into a pipeline, iterating
* through the resulting documents, and inserting them back to the data
* store as new triples. If pipeline conversion fails, falls back on
* default execution strategy.
* @param rule A construct query rule; not null.
* @param metadata StatementMetadata to attach to new triples; not null.
* @return The number of new triples inferred.
* @throws ForwardChainException if execution fails.
*/
@Override
public long executeConstructRule(AbstractConstructRule rule, StatementMetadata metadata) throws ForwardChainException {
Preconditions.checkNotNull(rule);
logger.info("Applying inference rule " + rule + "...");
long timestamp = System.currentTimeMillis();
// Get a pipeline that turns individual matches into triples
List<Bson> pipeline = null;
try {
int requireSourceLevel = 0;
if (!usedBackup) {
// If we can assume derivation levels are set properly, we can optimize by
// pruning any derived fact whose sources are all old information. (i.e. we can
// infer that the pruned fact would have already been derived in a previous
// step.) But if the backup strategy has ever been used, the source triples aren't
// guaranteed to have derivation level set.
requireSourceLevel = requiredLevel;
}
pipeline = toPipeline(rule, requireSourceLevel, timestamp);
} catch (ForwardChainException e) {
logger.error(e);
}
if (pipeline == null) {
if (backup == null) {
logger.error("Couldn't convert " + rule + " to pipeline:");
for (String line : rule.getQuery().toString().split("\n")) {
logger.error("\t" + line);
}
throw new UnsupportedOperationException("Couldn't convert query to pipeline.");
} else {
logger.debug("Couldn't convert " + rule + " to pipeline:");
for (String line : rule.getQuery().toString().split("\n")) {
logger.debug("\t" + line);
}
logger.debug("Using fallback strategy.");
usedBackup = true;
return backup.executeConstructRule(rule, metadata);
}
}
// Execute the pipeline
for (Bson step : pipeline) {
logger.debug("\t" + step.toString());
}
LongAdder count = new LongAdder();
baseCollection.aggregate(pipeline).allowDiskUse(true).batchSize(PIPELINE_BATCH_SIZE).forEach(new Block<Document>() {
@Override
public void apply(Document doc) {
final DBObject dbo = (DBObject) JSON.parse(doc.toJson());
RyaStatement rstmt = storageStrategy.deserializeDBObject(dbo);
if (!statementExists(rstmt)) {
count.increment();
doc.replace(SimpleMongoDBStorageStrategy.STATEMENT_METADATA, metadata.toString());
try {
batchWriter.addObjectToQueue(doc);
} catch (MongoDbBatchWriterException e) {
logger.error("Couldn't insert " + rstmt, e);
}
}
}
});
try {
batchWriter.flush();
} catch (MongoDbBatchWriterException e) {
throw new ForwardChainException("Error writing to Mongo", e);
}
logger.info("Added " + count + " new statements.");
executionTimes.compute(rule, (r, previous) -> {
if (previous != null && previous > timestamp) {
return previous;
} else {
return timestamp;
}
});
return count.longValue();
}
Aggregations