use of com.mongodb.MongoException in project camel by apache.
the class GridFsConsumer method run.
@Override
public void run() {
DBCursor c = null;
java.util.Date fromDate = null;
QueryStrategy s = endpoint.getQueryStrategy();
boolean usesTimestamp = s != QueryStrategy.FileAttribute;
boolean persistsTimestamp = s == QueryStrategy.PersistentTimestamp || s == QueryStrategy.PersistentTimestampAndFileAttribute;
boolean usesAttribute = s == QueryStrategy.FileAttribute || s == QueryStrategy.TimeStampAndFileAttribute || s == QueryStrategy.PersistentTimestampAndFileAttribute;
DBCollection ptsCollection = null;
DBObject persistentTimestamp = null;
if (persistsTimestamp) {
ptsCollection = endpoint.getDB().getCollection(endpoint.getPersistentTSCollection());
// ensure standard indexes as long as collections are small
try {
if (ptsCollection.count() < 1000) {
ptsCollection.createIndex(new BasicDBObject("id", 1));
}
} catch (MongoException e) {
//TODO: Logging
}
persistentTimestamp = ptsCollection.findOne(new BasicDBObject("id", endpoint.getPersistentTSObject()));
if (persistentTimestamp == null) {
persistentTimestamp = new BasicDBObject("id", endpoint.getPersistentTSObject());
fromDate = new java.util.Date();
persistentTimestamp.put("timestamp", fromDate);
ptsCollection.save(persistentTimestamp);
}
fromDate = (java.util.Date) persistentTimestamp.get("timestamp");
} else if (usesTimestamp) {
fromDate = new java.util.Date();
}
try {
Thread.sleep(endpoint.getInitialDelay());
while (isStarted()) {
if (c == null || c.getCursorId() == 0) {
if (c != null) {
c.close();
}
String queryString = endpoint.getQuery();
DBObject query;
if (queryString == null) {
query = new BasicDBObject();
} else {
query = (DBObject) JSON.parse(queryString);
}
if (usesTimestamp) {
query.put("uploadDate", new BasicDBObject("$gt", fromDate));
}
if (usesAttribute) {
query.put(endpoint.getFileAttributeName(), null);
}
c = endpoint.getFilesCollection().find(query);
}
boolean dateModified = false;
while (c.hasNext() && isStarted()) {
GridFSDBFile file = (GridFSDBFile) c.next();
GridFSDBFile forig = file;
if (usesAttribute) {
file.put(endpoint.getFileAttributeName(), "processing");
DBObject q = BasicDBObjectBuilder.start("_id", file.getId()).append("camel-processed", null).get();
forig = (GridFSDBFile) endpoint.getFilesCollection().findAndModify(q, null, null, false, file, true, false);
}
if (forig != null) {
file = endpoint.getGridFs().findOne(new BasicDBObject("_id", file.getId()));
Exchange exchange = endpoint.createExchange();
exchange.getIn().setHeader(GridFsEndpoint.GRIDFS_METADATA, JSON.serialize(file.getMetaData()));
exchange.getIn().setHeader(Exchange.FILE_CONTENT_TYPE, file.getContentType());
exchange.getIn().setHeader(Exchange.FILE_LENGTH, file.getLength());
exchange.getIn().setHeader(Exchange.FILE_LAST_MODIFIED, file.getUploadDate());
exchange.getIn().setBody(file.getInputStream(), InputStream.class);
try {
getProcessor().process(exchange);
//System.out.println("Processing " + file.getFilename());
if (usesAttribute) {
forig.put(endpoint.getFileAttributeName(), "done");
endpoint.getFilesCollection().save(forig);
}
if (usesTimestamp) {
if (file.getUploadDate().compareTo(fromDate) > 0) {
fromDate = file.getUploadDate();
dateModified = true;
}
}
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
if (persistsTimestamp && dateModified) {
persistentTimestamp.put("timestamp", fromDate);
ptsCollection.save(persistentTimestamp);
}
Thread.sleep(endpoint.getDelay());
}
} catch (Throwable e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
if (c != null) {
c.close();
}
}
use of com.mongodb.MongoException in project mongo-hadoop by mongodb.
the class StandaloneMongoSplitter method calculateSplits.
@Override
public List<InputSplit> calculateSplits() throws SplitFailedException {
final DBObject splitKey = MongoConfigUtil.getInputSplitKey(getConfiguration());
final DBObject splitKeyMax = MongoConfigUtil.getMaxSplitKey(getConfiguration());
final DBObject splitKeyMin = MongoConfigUtil.getMinSplitKey(getConfiguration());
final int splitSize = MongoConfigUtil.getSplitSize(getConfiguration());
final MongoClientURI inputURI;
DBCollection inputCollection = null;
final ArrayList<InputSplit> returnVal;
try {
inputURI = MongoConfigUtil.getInputURI(getConfiguration());
MongoClientURI authURI = MongoConfigUtil.getAuthURI(getConfiguration());
if (authURI != null) {
inputCollection = MongoConfigUtil.getCollectionWithAuth(inputURI, authURI);
} else {
inputCollection = MongoConfigUtil.getCollection(inputURI);
}
returnVal = new ArrayList<InputSplit>();
final String ns = inputCollection.getFullName();
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Running splitVector on namespace: %s.%s; hosts: %s", inputURI.getDatabase(), inputURI.getCollection(), inputURI.getHosts()));
}
final DBObject cmd = BasicDBObjectBuilder.start("splitVector", ns).add("keyPattern", splitKey).add("min", splitKeyMin).add("max", splitKeyMax).add("force", false).add("maxChunkSize", splitSize).get();
CommandResult data;
boolean ok = true;
try {
data = inputCollection.getDB().getSisterDB(inputURI.getDatabase()).command(cmd, ReadPreference.primary());
} catch (final MongoException e) {
// 2.0 servers throw exceptions rather than info in a CommandResult
data = null;
LOG.info(e.getMessage(), e);
if (e.getMessage().contains("unrecognized command: splitVector")) {
ok = false;
} else {
throw e;
}
}
if (data != null) {
if (data.containsField("$err")) {
throw new SplitFailedException("Error calculating splits: " + data);
} else if (!data.get("ok").equals(1.0)) {
ok = false;
}
}
if (!ok) {
final CommandResult stats = inputCollection.getStats();
if (stats.containsField("primary")) {
final DBCursor shards = inputCollection.getDB().getSisterDB("config").getCollection("shards").find(new BasicDBObject("_id", stats.getString("primary")));
try {
if (shards.hasNext()) {
final DBObject shard = shards.next();
final String host = ((String) shard.get("host")).replace(shard.get("_id") + "/", "");
final MongoClientURI shardHost;
if (authURI != null) {
shardHost = new MongoClientURIBuilder(authURI).host(host).build();
} else {
shardHost = new MongoClientURIBuilder(inputURI).host(host).build();
}
MongoClient shardClient = null;
try {
shardClient = new MongoClient(shardHost);
data = shardClient.getDB(shardHost.getDatabase()).command(cmd, ReadPreference.primary());
} catch (final Exception e) {
LOG.error(e.getMessage(), e);
} finally {
if (shardClient != null) {
shardClient.close();
}
}
}
} finally {
shards.close();
}
}
if (data != null && !data.get("ok").equals(1.0)) {
throw new SplitFailedException("Unable to calculate input splits: " + data.get("errmsg"));
}
}
// Comes in a format where "min" and "max" are implicit
// and each entry is just a boundary key; not ranged
final BasicDBList splitData = (BasicDBList) data.get("splitKeys");
if (splitData.size() == 0) {
LOG.warn("WARNING: No Input Splits were calculated by the split code. Proceeding with a *single* split. Data may be too" + " small, try lowering 'mongo.input.split_size' if this is undesirable.");
}
// Lower boundary of the first min split
BasicDBObject lastKey = null;
// If splitKeyMin was given, use it as first boundary.
if (!splitKeyMin.toMap().isEmpty()) {
lastKey = new BasicDBObject(splitKeyMin.toMap());
}
for (final Object aSplitData : splitData) {
final BasicDBObject currentKey = (BasicDBObject) aSplitData;
returnVal.add(createSplitFromBounds(lastKey, currentKey));
lastKey = currentKey;
}
BasicDBObject maxKey = null;
// If splitKeyMax was given, use it as last boundary.
if (!splitKeyMax.toMap().isEmpty()) {
maxKey = new BasicDBObject(splitKeyMax.toMap());
}
// Last max split
final MongoInputSplit lastSplit = createSplitFromBounds(lastKey, maxKey);
returnVal.add(lastSplit);
} finally {
if (inputCollection != null) {
MongoConfigUtil.close(inputCollection.getDB().getMongo());
}
}
if (MongoConfigUtil.isFilterEmptySplitsEnabled(getConfiguration())) {
return filterEmptySplits(returnVal);
}
return returnVal;
}
use of com.mongodb.MongoException in project mongo-java-driver by mongodb.
the class DefaultConnectionPool method get.
@Override
public InternalConnection get(final long timeout, final TimeUnit timeUnit) {
try {
if (waitQueueSize.incrementAndGet() > settings.getMaxWaitQueueSize()) {
throw createWaitQueueFullException();
}
try {
connectionPoolListener.waitQueueEntered(new ConnectionPoolWaitQueueEnteredEvent(serverId, currentThread().getId()));
PooledConnection pooledConnection = getPooledConnection(timeout, timeUnit);
if (!pooledConnection.opened()) {
try {
pooledConnection.open();
} catch (Throwable t) {
pool.release(pooledConnection.wrapped, true);
if (t instanceof MongoException) {
throw (MongoException) t;
} else {
throw new MongoInternalException(t.toString(), t);
}
}
}
return pooledConnection;
} finally {
connectionPoolListener.waitQueueExited(new ConnectionPoolWaitQueueExitedEvent(serverId, currentThread().getId()));
}
} finally {
waitQueueSize.decrementAndGet();
}
}
use of com.mongodb.MongoException in project mongo-java-driver by mongodb.
the class JsonPoweredCrudTestHelper method getOperationResults.
BsonDocument getOperationResults(final BsonDocument operation) {
String name = operation.getString("name").getValue();
BsonDocument arguments = operation.getDocument("arguments");
String methodName = "get" + name.substring(0, 1).toUpperCase() + name.substring(1) + "Result";
try {
Method method = getClass().getDeclaredMethod(methodName, BsonDocument.class);
return (BsonDocument) method.invoke(this, arguments);
} catch (NoSuchMethodException e) {
throw new UnsupportedOperationException("No handler for operation " + methodName);
} catch (InvocationTargetException e) {
if (e.getTargetException() instanceof AssumptionViolatedException) {
throw (AssumptionViolatedException) e.getTargetException();
}
if (e.getTargetException() instanceof MongoException) {
throw (MongoException) e.getTargetException();
}
throw (RuntimeException) e.getTargetException();
} catch (IllegalAccessException e) {
throw new UnsupportedOperationException("Invalid handler access for operation " + methodName);
}
}
use of com.mongodb.MongoException in project morphia by mongodb.
the class TestQuery method testWhereWithInvalidStringQuery.
@Test
public void testWhereWithInvalidStringQuery() {
getDs().save(new PhotoWithKeywords());
final CodeWScope hasKeyword = new CodeWScope("keywords != null", new BasicDBObject());
try {
// must fail
assertNotNull(getDs().find(PhotoWithKeywords.class).where(hasKeyword.getCode()).get());
fail("Invalid javascript magically isn't invalid anymore?");
} catch (MongoInternalException e) {
// fine
} catch (MongoException e) {
// fine
}
}
Aggregations