use of com.mongodb.reactivestreams.client.FindPublisher in project mongo-java-driver by mongodb.
the class GridFSUploadPublisherImpl method createCheckAndCreateIndexesMono.
private Mono<Void> createCheckAndCreateIndexesMono() {
MongoCollection<Document> collection = filesCollection.withDocumentClass(Document.class).withReadPreference(primary());
FindPublisher<Document> findPublisher;
if (clientSession != null) {
findPublisher = collection.find(clientSession);
} else {
findPublisher = collection.find();
}
AtomicBoolean collectionExists = new AtomicBoolean(false);
return Mono.create(sink -> Mono.from(findPublisher.projection(PROJECTION).first()).subscribe(d -> collectionExists.set(true), sink::error, () -> {
if (collectionExists.get()) {
sink.success();
} else {
checkAndCreateIndex(filesCollection.withReadPreference(primary()), FILES_INDEX).doOnError(sink::error).doOnSuccess(i -> {
checkAndCreateIndex(chunksCollection.withReadPreference(primary()), CHUNKS_INDEX).doOnError(sink::error).doOnSuccess(sink::success).subscribe();
}).subscribe();
}
}));
}
use of com.mongodb.reactivestreams.client.FindPublisher in project mongo-java-driver by mongodb.
the class GridFSDownloadPublisherImpl method getChunkPublisher.
private Flux<ByteBuffer> getChunkPublisher(final GridFSFile gridFSFile) {
Document filter = new Document("files_id", gridFSFile.getId());
FindPublisher<Document> chunkPublisher;
if (clientSession != null) {
chunkPublisher = chunksCollection.find(clientSession, filter);
} else {
chunkPublisher = chunksCollection.find(filter);
}
AtomicInteger chunkCounter = new AtomicInteger(0);
int numberOfChunks = (int) Math.ceil((double) gridFSFile.getLength() / gridFSFile.getChunkSize());
Flux<ByteBuffer> byteBufferFlux = Flux.from(chunkPublisher.sort(new Document("n", 1))).map(chunk -> {
int expectedChunkIndex = chunkCounter.getAndAdd(1);
if (chunk == null || chunk.getInteger("n") != expectedChunkIndex) {
throw new MongoGridFSException(format("Could not find file chunk for files_id: %s at chunk index %s.", gridFSFile.getId(), expectedChunkIndex));
} else if (!(chunk.get("data") instanceof Binary)) {
throw new MongoGridFSException("Unexpected data format for the chunk");
}
byte[] data = chunk.get("data", Binary.class).getData();
long expectedDataLength = 0;
if (numberOfChunks > 0) {
expectedDataLength = expectedChunkIndex + 1 == numberOfChunks ? gridFSFile.getLength() - (expectedChunkIndex * (long) gridFSFile.getChunkSize()) : gridFSFile.getChunkSize();
}
if (data.length != expectedDataLength) {
throw new MongoGridFSException(format("Chunk size data length is not the expected size. " + "The size was %s for file_id: %s chunk index %s it should be " + "%s bytes.", data.length, gridFSFile.getId(), expectedChunkIndex, expectedDataLength));
}
return ByteBuffer.wrap(data);
}).doOnComplete(() -> {
if (chunkCounter.get() < numberOfChunks) {
throw new MongoGridFSException(format("Could not find file chunk for files_id: %s at chunk index %s.", gridFSFile.getId(), chunkCounter.get()));
}
});
return bufferSizeBytes == null ? byteBufferFlux : new ResizingByteBufferFlux(byteBufferFlux, bufferSizeBytes);
}
use of com.mongodb.reactivestreams.client.FindPublisher in project immutables by immutables.
the class MongoSession method query.
private Publisher<?> query(StandardOperations.Select select) {
final Query query = select.query();
final boolean hasProjections = query.hasProjections();
boolean useAggregationPipeline = query.hasAggregations() || query.distinct() || query.count() && query.limit().isPresent();
ExpressionNaming expressionNaming = useAggregationPipeline ? ExpressionNaming.from(UniqueCachedNaming.of(query.projections())) : expression -> pathNaming.name((Path) expression);
MongoCollection<?> collection = this.collection;
if (hasProjections) {
// add special TupleCodecProvider for projections
CodecRegistry newRegistry = CodecRegistries.fromRegistries(this.collection.getCodecRegistry(), CodecRegistries.fromProviders(new TupleCodecProvider(query, expressionNaming)));
collection = this.collection.withDocumentClass(ProjectedTuple.class).withCodecRegistry(newRegistry);
}
if (useAggregationPipeline) {
// aggregations
AggregationQuery agg = new AggregationQuery(query, pathNaming);
if (query.count()) {
// also for empty result-set mongo does not return single(0) but empty publisher
return Flowable.fromPublisher(collection.aggregate(agg.toPipeline(), BsonDocument.class)).map(d -> d.get("count").asNumber().longValue()).defaultIfEmpty(// return Single.just(0) for empty publisher
0L);
}
return collection.aggregate(agg.toPipeline(), ProjectedTuple.class);
}
Bson filter = toBsonFilter(query);
if (query.count()) {
// simple form of count all (without distinct, aggregations etc.) : count(*)
return Flowable.fromPublisher(collection.countDocuments(filter));
}
final FindPublisher<?> find = collection.find(filter);
if (!query.collations().isEmpty()) {
// add sorting
final Function<Collation, Bson> toSortFn = col -> {
final String path = col.path().toStringPath();
return col.direction().isAscending() ? Sorts.ascending(path) : Sorts.descending(path);
};
final List<Bson> sorts = query.collations().stream().map(toSortFn).collect(Collectors.toList());
find.sort(Sorts.orderBy(sorts));
}
query.limit().ifPresent(limit -> find.limit((int) limit));
query.offset().ifPresent(offset -> find.skip((int) offset));
if (hasProjections) {
List<String> fields = query.projections().stream().map(p -> pathNaming.name((Path) p)).collect(Collectors.toList());
find.projection(Projections.include(fields));
return find;
}
// post-process result with projections
return find;
}
use of com.mongodb.reactivestreams.client.FindPublisher in project spring-data-mongodb by spring-projects.
the class ReactiveMongoTemplate method doRemove.
protected <T> Mono<DeleteResult> doRemove(String collectionName, Query query, @Nullable Class<T> entityClass) {
if (query == null) {
throw new InvalidDataAccessApiUsageException("Query passed in to remove can't be null!");
}
Assert.hasText(collectionName, "Collection name must not be null or empty!");
MongoPersistentEntity<?> entity = getPersistentEntity(entityClass);
DeleteContext deleteContext = queryOperations.deleteQueryContext(query);
Document queryObject = deleteContext.getMappedQuery(entity);
DeleteOptions deleteOptions = deleteContext.getDeleteOptions(entityClass);
Document removeQuery = deleteContext.getMappedQuery(entity);
MongoAction mongoAction = new MongoAction(writeConcern, MongoActionOperation.REMOVE, collectionName, entityClass, null, removeQuery);
WriteConcern writeConcernToUse = prepareWriteConcern(mongoAction);
return execute(collectionName, collection -> {
maybeEmitEvent(new BeforeDeleteEvent<>(removeQuery, entityClass, collectionName));
MongoCollection<Document> collectionToUse = prepareCollection(collection, writeConcernToUse);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Remove using query: %s in collection: %s.", serializeToJsonSafely(removeQuery), collectionName));
}
if (query.getLimit() > 0 || query.getSkip() > 0) {
FindPublisher<Document> cursor = new QueryFindPublisherPreparer(query, entityClass).prepare(//
collection.find(removeQuery)).projection(MappedDocument.getIdOnlyProjection());
return //
Flux.from(cursor).map(//
MappedDocument::of).map(//
MappedDocument::getId).collectList().flatMapMany(val -> {
return collectionToUse.deleteMany(MappedDocument.getIdIn(val), deleteOptions);
});
} else {
return collectionToUse.deleteMany(removeQuery, deleteOptions);
}
}).doOnNext(//
it -> maybeEmitEvent(new AfterDeleteEvent<>(queryObject, entityClass, collectionName))).next();
}
Aggregations