use of com.mongodb.MongoInterruptedException in project mongo-java-driver by mongodb.
the class SyncMongoCursor method hasNext.
@Override
@SuppressWarnings("unchecked")
public boolean hasNext() {
if (error != null) {
throw error;
}
if (completed) {
return false;
}
if (current != null) {
return true;
}
try {
Object next;
if (batchSize != null && batchSize != 0 && countToBatchSize == batchSize) {
subscription.request(batchSize);
countToBatchSize = 0;
}
next = results.pollFirst(TIMEOUT, TimeUnit.SECONDS);
if (next == null) {
throw new MongoTimeoutException("Time out waiting for result from cursor");
} else if (next instanceof Throwable) {
error = translateError((Throwable) next);
throw error;
} else if (next == COMPLETED) {
completed = true;
return false;
} else {
current = (T) next;
countToBatchSize++;
return true;
}
} catch (InterruptedException e) {
throw new MongoInterruptedException("Interrupted waiting for next result", e);
}
}
use of com.mongodb.MongoInterruptedException in project mongo-java-driver by mongodb.
the class BaseCluster method selectServer.
@Override
public ServerTuple selectServer(final ServerSelector serverSelector) {
isTrue("open", !isClosed());
try {
CountDownLatch currentPhase = phase.get();
ClusterDescription curDescription = description;
ServerSelector compositeServerSelector = getCompositeServerSelector(serverSelector);
ServerTuple serverTuple = selectServer(compositeServerSelector, curDescription);
boolean selectionFailureLogged = false;
long startTimeNanos = System.nanoTime();
long curTimeNanos = startTimeNanos;
long maxWaitTimeNanos = getMaxWaitTimeNanos();
while (true) {
throwIfIncompatible(curDescription);
if (serverTuple != null) {
return serverTuple;
}
if (curTimeNanos - startTimeNanos > maxWaitTimeNanos) {
throw createTimeoutException(serverSelector, curDescription);
}
if (!selectionFailureLogged) {
logServerSelectionFailure(serverSelector, curDescription);
selectionFailureLogged = true;
}
connect();
currentPhase.await(Math.min(maxWaitTimeNanos - (curTimeNanos - startTimeNanos), getMinWaitTimeNanos()), NANOSECONDS);
curTimeNanos = System.nanoTime();
currentPhase = phase.get();
curDescription = description;
serverTuple = selectServer(compositeServerSelector, curDescription);
}
} catch (InterruptedException e) {
throw new MongoInterruptedException(format("Interrupted while waiting for a server that matches %s", serverSelector), e);
}
}
use of com.mongodb.MongoInterruptedException in project mongo-java-driver by mongodb.
the class BaseCluster method getDescription.
@Override
public ClusterDescription getDescription() {
isTrue("open", !isClosed());
try {
CountDownLatch currentPhase = phase.get();
ClusterDescription curDescription = description;
boolean selectionFailureLogged = false;
long startTimeNanos = System.nanoTime();
long curTimeNanos = startTimeNanos;
long maxWaitTimeNanos = getMaxWaitTimeNanos();
while (curDescription.getType() == ClusterType.UNKNOWN) {
if (curTimeNanos - startTimeNanos > maxWaitTimeNanos) {
throw new MongoTimeoutException(format("Timed out after %d ms while waiting to connect. Client view of cluster state " + "is %s", settings.getServerSelectionTimeout(MILLISECONDS), curDescription.getShortDescription()));
}
if (!selectionFailureLogged) {
if (LOGGER.isInfoEnabled()) {
if (settings.getServerSelectionTimeout(MILLISECONDS) < 0) {
LOGGER.info("Cluster description not yet available. Waiting indefinitely.");
} else {
LOGGER.info(format("Cluster description not yet available. Waiting for %d ms before timing out", settings.getServerSelectionTimeout(MILLISECONDS)));
}
}
selectionFailureLogged = true;
}
connect();
currentPhase.await(Math.min(maxWaitTimeNanos - (curTimeNanos - startTimeNanos), getMinWaitTimeNanos()), NANOSECONDS);
curTimeNanos = System.nanoTime();
currentPhase = phase.get();
curDescription = description;
}
return curDescription;
} catch (InterruptedException e) {
throw new MongoInterruptedException("Interrupted while waiting to connect", e);
}
}
use of com.mongodb.MongoInterruptedException in project mongo-java-driver by mongodb.
the class Fixture method waitForLastServerSessionPoolRelease.
public static synchronized void waitForLastServerSessionPoolRelease() {
if (mongoClient != null) {
long startTime = System.currentTimeMillis();
int sessionInUseCount = getSessionInUseCount();
while (sessionInUseCount > 0) {
try {
if (System.currentTimeMillis() > startTime + TIMEOUT_DURATION.toMillis()) {
throw new MongoTimeoutException("Timed out waiting for server session pool in use count to drop to 0. Now at: " + sessionInUseCount);
}
sleep(10);
sessionInUseCount = getSessionInUseCount();
} catch (InterruptedException e) {
throw new MongoInterruptedException("Interrupted", e);
}
}
}
}
use of com.mongodb.MongoInterruptedException in project metis-framework by europeana.
the class FullBeanPublisher method publish.
/**
* Publishes an RDF.
*
* @param rdf RDF to publish.
* @param recordDate The date that would represent the created/updated date of a record
* @param datasetIdsToRedirectFrom The dataset ids that their records need to be redirected
* @param performRedirects flag that indicates if redirect should be performed
* @throws IndexingException which can be one of:
* <ul>
* <li>{@link IndexerRelatedIndexingException} In case an error occurred during publication.</li>
* <li>{@link SetupRelatedIndexingException} in case an error occurred during indexing setup</li>
* <li>{@link RecordRelatedIndexingException} in case an error occurred related to record
* contents</li>
* </ul>
*/
private void publish(RdfWrapper rdf, Date recordDate, List<String> datasetIdsToRedirectFrom, boolean performRedirects) throws IndexingException {
// Convert RDF to Full Bean.
final RdfToFullBeanConverter fullBeanConverter = fullBeanConverterSupplier.get();
final FullBeanImpl fullBean = fullBeanConverter.convertRdfToFullBean(rdf);
// Provide the preprocessor: this will set the created and updated timestamps as needed.
final TriConsumer<FullBeanImpl, FullBeanImpl, Pair<Date, Date>> fullBeanPreprocessor = preserveUpdateAndCreateTimesFromRdf ? EMPTY_PREPROCESSOR : (FullBeanPublisher::setUpdateAndCreateTime);
// Perform redirection
final List<Pair<String, Date>> recordsForRedirection;
try {
recordsForRedirection = RecordRedirectsUtil.checkAndApplyRedirects(recordRedirectDao, rdf, recordDate, datasetIdsToRedirectFrom, performRedirects, this::getSolrDocuments);
} catch (RuntimeException e) {
throw new RecordRelatedIndexingException(REDIRECT_PUBLISH_ERROR, e);
}
// Publish to Mongo
final FullBeanImpl savedFullBean;
try {
savedFullBean = new FullBeanUpdater(fullBeanPreprocessor).update(fullBean, recordDate, recordsForRedirection.stream().map(Pair::getValue).min(Comparator.naturalOrder()).orElse(null), edmMongoClient);
} catch (MongoIncompatibleDriverException | MongoConfigurationException | MongoSecurityException e) {
throw new SetupRelatedIndexingException(MONGO_SERVER_PUBLISH_ERROR, e);
} catch (MongoSocketException | MongoClientException | MongoInternalException | MongoInterruptedException e) {
throw new IndexerRelatedIndexingException(MONGO_SERVER_PUBLISH_ERROR, e);
} catch (RuntimeException e) {
throw new RecordRelatedIndexingException(MONGO_SERVER_PUBLISH_ERROR, e);
}
// Publish to Solr
try {
retryableExternalRequestForNetworkExceptions(() -> {
try {
publishToSolr(rdf, savedFullBean);
} catch (IndexingException e) {
throw new RuntimeException(e);
}
return null;
});
} catch (Exception e) {
throw new RecordRelatedIndexingException(SOLR_SERVER_PUBLISH_ERROR, e);
}
}
Aggregations