use of com.mongodb.DBObject in project jetty.project by eclipse.
the class MongoSessionDataStore method exists.
/**
* @see org.eclipse.jetty.server.session.SessionDataStore#exists(java.lang.String)
*/
@Override
public boolean exists(String id) throws Exception {
DBObject fields = new BasicDBObject();
fields.put(__EXPIRY, 1);
fields.put(__VALID, 1);
DBObject sessionDocument = _dbSessions.findOne(new BasicDBObject(__ID, id), fields);
if (sessionDocument == null)
//doesn't exist
return false;
Boolean valid = (Boolean) sessionDocument.get(__VALID);
if (!valid)
//invalid - nb should not happen
return false;
Long expiry = (Long) sessionDocument.get(__EXPIRY);
if (expiry.longValue() <= 0)
//never expires, its good
return true;
//expires later
return (expiry.longValue() > System.currentTimeMillis());
}
use of com.mongodb.DBObject in project jetty.project by eclipse.
the class MongoSessionDataStore method delete.
/**
* @see org.eclipse.jetty.server.session.SessionDataStore#delete(String)
*/
@Override
public boolean delete(String id) throws Exception {
if (LOG.isDebugEnabled())
LOG.debug("Remove:session {} for context ", id, _context);
/*
* Check if the session exists and if it does remove the context
* associated with this session
*/
BasicDBObject mongoKey = new BasicDBObject(__ID, id);
//DBObject sessionDocument = _dbSessions.findOne(mongoKey,_version_1);
DBObject sessionDocument = _dbSessions.findOne(new BasicDBObject(__ID, id));
if (sessionDocument != null) {
DBObject c = (DBObject) getNestedValue(sessionDocument, __CONTEXT);
if (c == null) {
//delete whole doc
_dbSessions.remove(mongoKey, WriteConcern.SAFE);
return false;
}
Set<String> contexts = c.keySet();
if (contexts.isEmpty()) {
//delete whole doc
_dbSessions.remove(mongoKey, WriteConcern.SAFE);
return false;
}
if (contexts.size() == 1 && contexts.iterator().next().equals(getCanonicalContextId())) {
//delete whole doc
_dbSessions.remove(new BasicDBObject(__ID, id), WriteConcern.SAFE);
return true;
}
//just remove entry for my context
BasicDBObject remove = new BasicDBObject();
BasicDBObject unsets = new BasicDBObject();
unsets.put(getContextField(), 1);
remove.put("$unset", unsets);
WriteResult result = _dbSessions.update(mongoKey, remove, false, false, WriteConcern.SAFE);
return true;
} else {
return false;
}
}
use of com.mongodb.DBObject in project jetty.project by eclipse.
the class MongoSessionDataStore method doGetExpired.
/**
* @see org.eclipse.jetty.server.session.SessionDataStore#getExpired(Set)
*/
@Override
public Set<String> doGetExpired(Set<String> candidates) {
long now = System.currentTimeMillis();
long upperBound = now;
Set<String> expiredSessions = new HashSet<>();
//firstly ask mongo to verify if these candidate ids have expired - all of
//these candidates will be for our node
BasicDBObject query = new BasicDBObject();
query.append(__ID, new BasicDBObject("$in", candidates));
query.append(__EXPIRY, new BasicDBObject("$gt", 0).append("$lt", upperBound));
DBCursor verifiedExpiredSessions = null;
try {
verifiedExpiredSessions = _dbSessions.find(query, new BasicDBObject(__ID, 1));
for (DBObject session : verifiedExpiredSessions) {
String id = (String) session.get(__ID);
if (LOG.isDebugEnabled())
LOG.debug("{} Mongo confirmed expired session {}", _context, id);
expiredSessions.add(id);
}
} finally {
if (verifiedExpiredSessions != null)
verifiedExpiredSessions.close();
}
//if this is our first expiry check, make sure that we only grab really old sessions
if (_lastExpiryCheckTime <= 0)
upperBound = (now - (3 * (1000L * _gracePeriodSec)));
else
upperBound = _lastExpiryCheckTime - (1000L * _gracePeriodSec);
query = new BasicDBObject();
BasicDBObject gt = new BasicDBObject(__EXPIRY, new BasicDBObject("$gt", 0));
BasicDBObject lt = new BasicDBObject(__EXPIRY, new BasicDBObject("$lt", upperBound));
BasicDBList list = new BasicDBList();
list.add(gt);
list.add(lt);
query.append("and", list);
DBCursor oldExpiredSessions = null;
try {
BasicDBObject bo = new BasicDBObject(__ID, 1);
bo.append(__EXPIRY, 1);
oldExpiredSessions = _dbSessions.find(query, bo);
for (DBObject session : oldExpiredSessions) {
String id = (String) session.get(__ID);
if (LOG.isDebugEnabled())
LOG.debug("{} Mongo found old expired session {}", _context, id + " exp=" + session.get(__EXPIRY));
expiredSessions.add(id);
}
} finally {
oldExpiredSessions.close();
}
return expiredSessions;
}
use of com.mongodb.DBObject in project mongo-hadoop by mongodb.
the class GridFSInputFormat method getSplits.
@Override
public List<InputSplit> getSplits(final JobContext context) throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
DBCollection inputCollection = MongoConfigUtil.getInputCollection(conf);
MongoClientURI inputURI = MongoConfigUtil.getInputURI(conf);
GridFS gridFS = new GridFS(inputCollection.getDB(), inputCollection.getName());
DBObject query = MongoConfigUtil.getQuery(conf);
List<InputSplit> splits = new LinkedList<InputSplit>();
for (GridFSDBFile file : gridFS.find(query)) {
// One split per file.
if (MongoConfigUtil.isGridFSWholeFileSplit(conf)) {
splits.add(new GridFSSplit(inputURI, (ObjectId) file.getId(), (int) file.getChunkSize(), file.getLength()));
} else // One split per file chunk.
{
for (int chunk = 0; chunk < file.numChunks(); ++chunk) {
splits.add(new GridFSSplit(inputURI, (ObjectId) file.getId(), (int) file.getChunkSize(), file.getLength(), chunk));
}
}
}
LOG.debug("Found GridFS splits: " + splits);
return splits;
}
use of com.mongodb.DBObject in project mongo-hadoop by mongodb.
the class StandaloneMongoSplitter method calculateSplits.
@Override
public List<InputSplit> calculateSplits() throws SplitFailedException {
final DBObject splitKey = MongoConfigUtil.getInputSplitKey(getConfiguration());
final DBObject splitKeyMax = MongoConfigUtil.getMaxSplitKey(getConfiguration());
final DBObject splitKeyMin = MongoConfigUtil.getMinSplitKey(getConfiguration());
final int splitSize = MongoConfigUtil.getSplitSize(getConfiguration());
final MongoClientURI inputURI;
DBCollection inputCollection = null;
final ArrayList<InputSplit> returnVal;
try {
inputURI = MongoConfigUtil.getInputURI(getConfiguration());
MongoClientURI authURI = MongoConfigUtil.getAuthURI(getConfiguration());
if (authURI != null) {
inputCollection = MongoConfigUtil.getCollectionWithAuth(inputURI, authURI);
} else {
inputCollection = MongoConfigUtil.getCollection(inputURI);
}
returnVal = new ArrayList<InputSplit>();
final String ns = inputCollection.getFullName();
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Running splitVector on namespace: %s.%s; hosts: %s", inputURI.getDatabase(), inputURI.getCollection(), inputURI.getHosts()));
}
final DBObject cmd = BasicDBObjectBuilder.start("splitVector", ns).add("keyPattern", splitKey).add("min", splitKeyMin).add("max", splitKeyMax).add("force", false).add("maxChunkSize", splitSize).get();
CommandResult data;
boolean ok = true;
try {
data = inputCollection.getDB().getSisterDB(inputURI.getDatabase()).command(cmd, ReadPreference.primary());
} catch (final MongoException e) {
// 2.0 servers throw exceptions rather than info in a CommandResult
data = null;
LOG.info(e.getMessage(), e);
if (e.getMessage().contains("unrecognized command: splitVector")) {
ok = false;
} else {
throw e;
}
}
if (data != null) {
if (data.containsField("$err")) {
throw new SplitFailedException("Error calculating splits: " + data);
} else if (!data.get("ok").equals(1.0)) {
ok = false;
}
}
if (!ok) {
final CommandResult stats = inputCollection.getStats();
if (stats.containsField("primary")) {
final DBCursor shards = inputCollection.getDB().getSisterDB("config").getCollection("shards").find(new BasicDBObject("_id", stats.getString("primary")));
try {
if (shards.hasNext()) {
final DBObject shard = shards.next();
final String host = ((String) shard.get("host")).replace(shard.get("_id") + "/", "");
final MongoClientURI shardHost;
if (authURI != null) {
shardHost = new MongoClientURIBuilder(authURI).host(host).build();
} else {
shardHost = new MongoClientURIBuilder(inputURI).host(host).build();
}
MongoClient shardClient = null;
try {
shardClient = new MongoClient(shardHost);
data = shardClient.getDB(shardHost.getDatabase()).command(cmd, ReadPreference.primary());
} catch (final Exception e) {
LOG.error(e.getMessage(), e);
} finally {
if (shardClient != null) {
shardClient.close();
}
}
}
} finally {
shards.close();
}
}
if (data != null && !data.get("ok").equals(1.0)) {
throw new SplitFailedException("Unable to calculate input splits: " + data.get("errmsg"));
}
}
// Comes in a format where "min" and "max" are implicit
// and each entry is just a boundary key; not ranged
final BasicDBList splitData = (BasicDBList) data.get("splitKeys");
if (splitData.size() == 0) {
LOG.warn("WARNING: No Input Splits were calculated by the split code. Proceeding with a *single* split. Data may be too" + " small, try lowering 'mongo.input.split_size' if this is undesirable.");
}
// Lower boundary of the first min split
BasicDBObject lastKey = null;
// If splitKeyMin was given, use it as first boundary.
if (!splitKeyMin.toMap().isEmpty()) {
lastKey = new BasicDBObject(splitKeyMin.toMap());
}
for (final Object aSplitData : splitData) {
final BasicDBObject currentKey = (BasicDBObject) aSplitData;
returnVal.add(createSplitFromBounds(lastKey, currentKey));
lastKey = currentKey;
}
BasicDBObject maxKey = null;
// If splitKeyMax was given, use it as last boundary.
if (!splitKeyMax.toMap().isEmpty()) {
maxKey = new BasicDBObject(splitKeyMax.toMap());
}
// Last max split
final MongoInputSplit lastSplit = createSplitFromBounds(lastKey, maxKey);
returnVal.add(lastSplit);
} finally {
if (inputCollection != null) {
MongoConfigUtil.close(inputCollection.getDB().getMongo());
}
}
if (MongoConfigUtil.isFilterEmptySplitsEnabled(getConfiguration())) {
return filterEmptySplits(returnVal);
}
return returnVal;
}
Aggregations