use of com.mongodb.DBCursor in project mongomvcc by igd-geo.
the class MongoDBVCollection method findOne.
@SuppressWarnings("unchecked")
@Override
public Map<String, Object> findOne(Map<String, Object> example) {
DBObject o = new BasicDBObject();
o.putAll(_branch.getQueryObject());
o.putAll(example);
OIDInIndexFilter filter = new OIDInIndexFilter();
DBCursor c = _delegate.find(o);
for (DBObject obj : c) {
if (filter.filter(obj)) {
if (obj instanceof Map) {
return (Map<String, Object>) obj;
}
return obj.toMap();
}
}
return null;
}
use of com.mongodb.DBCursor in project mongomvcc by igd-geo.
the class MongoDBVMaintenance method doFindUnreferencedDocuments.
private long[] doFindUnreferencedDocuments(String collection, long expiry, TimeUnit unit) {
long maxTime = getMaxTime(expiry, unit);
//fetch the OIDs of all documents older than the expiry time
DBCollection collDocs = _db.getDB().getCollection(collection);
DBCursor docs = collDocs.find(new BasicDBObject(MongoDBConstants.TIMESTAMP, //also include docs without a timestamp
new BasicDBObject("$not", new BasicDBObject("$gte", maxTime))), new BasicDBObject(MongoDBConstants.ID, 1));
IdSet oids = new IdHashSet(docs.count());
for (DBObject o : docs) {
oids.add((Long) o.get(MongoDBConstants.ID));
}
//iterate through all commits and eliminate referenced documents
DBCollection collCommits = _db.getDB().getCollection(MongoDBConstants.COLLECTION_COMMITS);
for (DBObject o : collCommits.find()) {
Commit c = Tree.deserializeCommit(o);
Map<String, IdMap> allObjs = c.getObjects();
IdMap objs = allObjs.get(collection);
if (objs != null) {
//eliminate OIDs referenced by this commit
IdMapIterator mi = objs.iterator();
while (mi.hasNext()) {
mi.advance();
oids.remove(mi.value());
}
}
}
//the remaining OIDs must be the unreferenced ones
return oids.toArray();
}
use of com.mongodb.DBCursor in project mongomvcc by igd-geo.
the class MongoDBVCollectionTest method lifetimeInsertedLaterOptimization.
/**
* Tests if lifetime optimization takes effect. Objects that have
* been inserted in a later commit should not be loaded but filtered
* out on the database level already.
*/
@Test
@Ignore("Not ready yet. We need to implement full branch history.")
public void lifetimeInsertedLaterOptimization() {
//ignore this test if we're on MongoDB 1.x
assumeNotNull(((MongoDBVDatabase) _db).getBuildInfo());
assumeTrue(((MongoDBVDatabase) _db).getBuildInfo().getMajorVersion() >= 2);
//insert two documents to skip in-index shortcut
putPerson("Max", 6);
putPerson("Pax", 8);
long firstCID = _master.commit();
putPerson("Elvis", 3);
_master.commit();
VBranch oldMaster = _db.checkout(firstCID);
VCollection persons = oldMaster.getCollection("persons");
VCursor cursor = persons.find();
DBCursor dbcursor = extractDBCursor(cursor);
assertEquals(2, cursor.size());
assertTrue(hasAttachedFilter(cursor));
assertEquals(2, dbcursor.size());
}
use of com.mongodb.DBCursor in project openhab1-addons by openhab.
the class MongoDBPersistenceService method query.
@Override
public Iterable<HistoricItem> query(FilterCriteria filter) {
if (!initialized) {
return Collections.emptyList();
}
if (!isConnected()) {
connectToDatabase();
}
if (!isConnected()) {
return Collections.emptyList();
}
String name = filter.getItemName();
Item item = getItem(name);
List<HistoricItem> items = new ArrayList<HistoricItem>();
DBObject query = new BasicDBObject();
if (filter.getItemName() != null) {
query.put(FIELD_ITEM, filter.getItemName());
}
if (filter.getState() != null && filter.getOperator() != null) {
String op = convertOperator(filter.getOperator());
Object value = convertValue(filter.getState());
query.put(FIELD_VALUE, new BasicDBObject(op, value));
}
if (filter.getBeginDate() != null) {
query.put(FIELD_TIMESTAMP, new BasicDBObject("$gte", filter.getBeginDate()));
}
if (filter.getEndDate() != null) {
query.put(FIELD_TIMESTAMP, new BasicDBObject("$lte", filter.getEndDate()));
}
Integer sortDir = (filter.getOrdering() == Ordering.ASCENDING) ? 1 : -1;
DBCursor cursor = this.mongoCollection.find(query).sort(new BasicDBObject(FIELD_TIMESTAMP, sortDir)).skip(filter.getPageNumber() * filter.getPageSize()).limit(filter.getPageSize());
while (cursor.hasNext()) {
BasicDBObject obj = (BasicDBObject) cursor.next();
final State state;
if (item instanceof NumberItem) {
state = new DecimalType(obj.getDouble(FIELD_VALUE));
} else if (item instanceof DimmerItem) {
state = new PercentType(obj.getInt(FIELD_VALUE));
} else if (item instanceof SwitchItem) {
state = OnOffType.valueOf(obj.getString(FIELD_VALUE));
} else if (item instanceof ContactItem) {
state = OpenClosedType.valueOf(obj.getString(FIELD_VALUE));
} else if (item instanceof RollershutterItem) {
state = new PercentType(obj.getInt(FIELD_VALUE));
} else if (item instanceof ColorItem) {
state = new HSBType(obj.getString(FIELD_VALUE));
} else if (item instanceof DateTimeItem) {
Calendar cal = Calendar.getInstance();
cal.setTime(obj.getDate(FIELD_VALUE));
state = new DateTimeType(cal);
} else {
state = new StringType(obj.getString(FIELD_VALUE));
}
items.add(new MongoDBItem(name, state, obj.getDate(FIELD_TIMESTAMP)));
}
return items;
}
use of com.mongodb.DBCursor in project camel by apache.
the class GridFsProducer method process.
public void process(Exchange exchange) throws Exception {
String operation = endpoint.getOperation();
if (operation == null) {
operation = exchange.getIn().getHeader(GridFsEndpoint.GRIDFS_OPERATION, String.class);
}
if (operation == null || "create".equals(operation)) {
final String filename = exchange.getIn().getHeader(Exchange.FILE_NAME, String.class);
Long chunkSize = exchange.getIn().getHeader(GridFsEndpoint.GRIDFS_CHUNKSIZE, Long.class);
InputStream ins = exchange.getIn().getMandatoryBody(InputStream.class);
GridFSInputFile gfsFile = endpoint.getGridFs().createFile(ins, filename, true);
if (chunkSize != null && chunkSize > 0) {
gfsFile.setChunkSize(chunkSize);
}
final String ct = exchange.getIn().getHeader(Exchange.CONTENT_TYPE, String.class);
if (ct != null) {
gfsFile.setContentType(ct);
}
String metaData = exchange.getIn().getHeader(GridFsEndpoint.GRIDFS_METADATA, String.class);
DBObject dbObject = (DBObject) JSON.parse(metaData);
gfsFile.setMetaData(dbObject);
gfsFile.save();
//add headers with the id and file name produced by the driver.
exchange.getIn().setHeader(Exchange.FILE_NAME_PRODUCED, gfsFile.getFilename());
exchange.getIn().setHeader(GridFsEndpoint.GRIDFS_FILE_ID_PRODUCED, gfsFile.getId());
} else if ("remove".equals(operation)) {
final String filename = exchange.getIn().getHeader(Exchange.FILE_NAME, String.class);
endpoint.getGridFs().remove(filename);
} else if ("findOne".equals(operation)) {
final String filename = exchange.getIn().getHeader(Exchange.FILE_NAME, String.class);
GridFSDBFile file = endpoint.getGridFs().findOne(filename);
if (file != null) {
exchange.getIn().setHeader(GridFsEndpoint.GRIDFS_METADATA, JSON.serialize(file.getMetaData()));
exchange.getIn().setHeader(Exchange.FILE_CONTENT_TYPE, file.getContentType());
exchange.getIn().setHeader(Exchange.FILE_LENGTH, file.getLength());
exchange.getIn().setHeader(Exchange.FILE_LAST_MODIFIED, file.getUploadDate());
exchange.getIn().setBody(file.getInputStream(), InputStream.class);
} else {
throw new FileNotFoundException("No GridFS file for " + filename);
}
} else if ("listAll".equals(operation)) {
final String filename = exchange.getIn().getHeader(Exchange.FILE_NAME, String.class);
DBCursor cursor;
if (filename == null) {
cursor = endpoint.getGridFs().getFileList();
} else {
cursor = endpoint.getGridFs().getFileList(new BasicDBObject("filename", filename));
}
exchange.getIn().setBody(new DBCursorFilenameReader(cursor), Reader.class);
} else if ("count".equals(operation)) {
final String filename = exchange.getIn().getHeader(Exchange.FILE_NAME, String.class);
DBCursor cursor;
if (filename == null) {
cursor = endpoint.getGridFs().getFileList();
} else {
cursor = endpoint.getGridFs().getFileList(new BasicDBObject("filename", filename));
}
exchange.getIn().setBody(cursor.count(), Integer.class);
}
}
Aggregations