use of com.mongodb.reactivestreams.client.ClientSession in project spring-data-mongodb by spring-projects.
the class ReactiveClientSessionTests method shouldApplyClientSession.
// DATAMONGO-1880
@Test
public void shouldApplyClientSession() {
ClientSession session = Mono.from(client.startSession(ClientSessionOptions.builder().causallyConsistent(true).build())).block();
assertThat(session.getOperationTime()).isNull();
//
template.withSession(() -> session).execute(//
action -> action.findAll(Document.class, COLLECTION_NAME)).as(//
StepVerifier::create).expectNextCount(1).verifyComplete();
assertThat(session.getOperationTime()).isNotNull();
assertThat(session.getServerSession().isClosed()).isFalse();
session.close();
}
use of com.mongodb.reactivestreams.client.ClientSession in project spring-data-mongodb by spring-projects.
the class ReactiveMongoTemplateTransactionTests method reactiveTransactionWithExplicitTransactionStart.
// DATAMONGO-1970
@Test
public void reactiveTransactionWithExplicitTransactionStart() {
Publisher<ClientSession> sessionPublisher = client.startSession(ClientSessionOptions.builder().causallyConsistent(true).build());
ClientSession clientSession = Mono.from(sessionPublisher).block();
template.withSession(Mono.just(clientSession)).execute(action -> ReactiveMongoContext.getSession().flatMap(session -> {
session.startTransaction();
return action.remove(ID_QUERY, Document.class, COLLECTION_NAME);
})).as(StepVerifier::create).expectNextCount(1).verifyComplete();
//
template.exists(ID_QUERY, COLLECTION_NAME).as(//
StepVerifier::create).expectNext(//
true).verifyComplete();
assertThat(clientSession.hasActiveTransaction()).isTrue();
StepVerifier.create(clientSession.commitTransaction()).verifyComplete();
//
template.exists(ID_QUERY, COLLECTION_NAME).as(//
StepVerifier::create).expectNext(//
false).verifyComplete();
}
use of com.mongodb.reactivestreams.client.ClientSession in project spring-data-mongodb by spring-projects.
the class ReactiveSessionBoundMongoTemplateUnitTests method setUp.
@Before
public void setUp() {
when(client.getDatabase(anyString())).thenReturn(database);
when(codecRegistry.get(any(Class.class))).thenReturn(new BsonValueCodec());
when(database.getCodecRegistry()).thenReturn(codecRegistry);
when(database.getCollection(anyString())).thenReturn(collection);
when(database.getCollection(anyString(), any())).thenReturn(collection);
when(database.listCollectionNames(any(ClientSession.class))).thenReturn(findPublisher);
when(database.createCollection(any(ClientSession.class), any(), any())).thenReturn(resultPublisher);
when(database.runCommand(any(ClientSession.class), any(), any(Class.class))).thenReturn(resultPublisher);
when(collection.find(any(ClientSession.class))).thenReturn(findPublisher);
when(collection.find(any(ClientSession.class), any(Document.class))).thenReturn(findPublisher);
when(collection.find(any(ClientSession.class), any(Class.class))).thenReturn(findPublisher);
when(collection.find(any(ClientSession.class), any(), any())).thenReturn(findPublisher);
when(collection.deleteMany(any(ClientSession.class), any(), any())).thenReturn(resultPublisher);
when(collection.insertOne(any(ClientSession.class), any(Document.class))).thenReturn(resultPublisher);
when(collection.aggregate(any(ClientSession.class), anyList(), any(Class.class))).thenReturn(aggregatePublisher);
when(collection.countDocuments(any(ClientSession.class), any(), any(CountOptions.class))).thenReturn(resultPublisher);
when(collection.drop(any(ClientSession.class))).thenReturn(resultPublisher);
when(collection.findOneAndUpdate(any(ClientSession.class), any(), any(Bson.class), any())).thenReturn(resultPublisher);
when(collection.distinct(any(ClientSession.class), any(), any(Bson.class), any())).thenReturn(distinctPublisher);
when(collection.updateOne(any(ClientSession.class), any(), any(Bson.class), any(UpdateOptions.class))).thenReturn(resultPublisher);
when(collection.updateMany(any(ClientSession.class), any(), any(Bson.class), any(UpdateOptions.class))).thenReturn(resultPublisher);
when(collection.dropIndex(any(ClientSession.class), anyString())).thenReturn(resultPublisher);
when(collection.mapReduce(any(ClientSession.class), any(), any(), any())).thenReturn(mapReducePublisher);
when(findPublisher.projection(any())).thenReturn(findPublisher);
when(findPublisher.limit(anyInt())).thenReturn(findPublisher);
when(findPublisher.collation(any())).thenReturn(findPublisher);
when(findPublisher.first()).thenReturn(resultPublisher);
when(aggregatePublisher.allowDiskUse(anyBoolean())).thenReturn(aggregatePublisher);
factory = new SimpleReactiveMongoDatabaseFactory(client, "foo");
this.mappingContext = new MongoMappingContext();
this.converter = new MappingMongoConverter(NoOpDbRefResolver.INSTANCE, mappingContext);
this.template = new ReactiveSessionBoundMongoTemplate(clientSession, new ReactiveMongoTemplate(factory, converter));
}
use of com.mongodb.reactivestreams.client.ClientSession in project mongo-java-driver by mongodb.
the class GridFSUploadPublisherImpl method createCheckAndCreateIndexesMono.
private Mono<Void> createCheckAndCreateIndexesMono() {
MongoCollection<Document> collection = filesCollection.withDocumentClass(Document.class).withReadPreference(primary());
FindPublisher<Document> findPublisher;
if (clientSession != null) {
findPublisher = collection.find(clientSession);
} else {
findPublisher = collection.find();
}
AtomicBoolean collectionExists = new AtomicBoolean(false);
return Mono.create(sink -> Mono.from(findPublisher.projection(PROJECTION).first()).subscribe(d -> collectionExists.set(true), sink::error, () -> {
if (collectionExists.get()) {
sink.success();
} else {
checkAndCreateIndex(filesCollection.withReadPreference(primary()), FILES_INDEX).doOnError(sink::error).doOnSuccess(i -> {
checkAndCreateIndex(chunksCollection.withReadPreference(primary()), CHUNKS_INDEX).doOnError(sink::error).doOnSuccess(sink::success).subscribe();
}).subscribe();
}
}));
}
use of com.mongodb.reactivestreams.client.ClientSession in project mongo-java-driver by mongodb.
the class OperationExecutorImpl method execute.
@Override
public <T> Mono<T> execute(final AsyncReadOperation<T> operation, final ReadPreference readPreference, final ReadConcern readConcern, @Nullable final ClientSession session) {
notNull("operation", operation);
notNull("readPreference", readPreference);
notNull("readConcern", readConcern);
if (session != null) {
session.notifyOperationInitiated(operation);
}
return Mono.from(subscriber -> clientSessionHelper.withClientSession(session, OperationExecutorImpl.this).map(clientSession -> getReadWriteBinding(getContext(subscriber), readPreference, readConcern, clientSession, session == null && clientSession != null)).switchIfEmpty(Mono.fromCallable(() -> getReadWriteBinding(getContext(subscriber), readPreference, readConcern, session, false))).flatMap(binding -> {
if (session != null && session.hasActiveTransaction() && !binding.getReadPreference().equals(primary())) {
binding.release();
return Mono.error(new MongoClientException("Read preference in a transaction must be primary"));
} else {
return Mono.<T>create(sink -> operation.executeAsync(binding, (result, t) -> {
try {
binding.release();
} finally {
sinkToCallback(sink).onResult(result, t);
}
})).doOnError((t) -> {
labelException(session, t);
unpinServerAddressOnTransientTransactionError(session, t);
});
}
}).subscribe(subscriber));
}
Aggregations