use of com.mongodb.client.gridfs.model.GridFSFile in project mongo-java-driver by mongodb.
the class GridFSDownloadPublisherImpl method getChunkPublisher.
private Flux<ByteBuffer> getChunkPublisher(final GridFSFile gridFSFile) {
Document filter = new Document("files_id", gridFSFile.getId());
FindPublisher<Document> chunkPublisher;
if (clientSession != null) {
chunkPublisher = chunksCollection.find(clientSession, filter);
} else {
chunkPublisher = chunksCollection.find(filter);
}
AtomicInteger chunkCounter = new AtomicInteger(0);
int numberOfChunks = (int) Math.ceil((double) gridFSFile.getLength() / gridFSFile.getChunkSize());
Flux<ByteBuffer> byteBufferFlux = Flux.from(chunkPublisher.sort(new Document("n", 1))).map(chunk -> {
int expectedChunkIndex = chunkCounter.getAndAdd(1);
if (chunk == null || chunk.getInteger("n") != expectedChunkIndex) {
throw new MongoGridFSException(format("Could not find file chunk for files_id: %s at chunk index %s.", gridFSFile.getId(), expectedChunkIndex));
} else if (!(chunk.get("data") instanceof Binary)) {
throw new MongoGridFSException("Unexpected data format for the chunk");
}
byte[] data = chunk.get("data", Binary.class).getData();
long expectedDataLength = 0;
if (numberOfChunks > 0) {
expectedDataLength = expectedChunkIndex + 1 == numberOfChunks ? gridFSFile.getLength() - (expectedChunkIndex * (long) gridFSFile.getChunkSize()) : gridFSFile.getChunkSize();
}
if (data.length != expectedDataLength) {
throw new MongoGridFSException(format("Chunk size data length is not the expected size. " + "The size was %s for file_id: %s chunk index %s it should be " + "%s bytes.", data.length, gridFSFile.getId(), expectedChunkIndex, expectedDataLength));
}
return ByteBuffer.wrap(data);
}).doOnComplete(() -> {
if (chunkCounter.get() < numberOfChunks) {
throw new MongoGridFSException(format("Could not find file chunk for files_id: %s at chunk index %s.", gridFSFile.getId(), chunkCounter.get()));
}
});
return bufferSizeBytes == null ? byteBufferFlux : new ResizingByteBufferFlux(byteBufferFlux, bufferSizeBytes);
}
use of com.mongodb.client.gridfs.model.GridFSFile in project mongo-java-driver by mongodb.
the class GridFSPublisherCreator method createDeletePublisher.
public static Publisher<Void> createDeletePublisher(final MongoCollection<GridFSFile> filesCollection, final MongoCollection<Document> chunksCollection, @Nullable final ClientSession clientSession, final BsonValue id) {
notNull("filesCollection", filesCollection);
notNull("chunksCollection", chunksCollection);
notNull("id", id);
BsonDocument filter = new BsonDocument("_id", id);
Publisher<DeleteResult> fileDeletePublisher;
if (clientSession == null) {
fileDeletePublisher = filesCollection.deleteOne(filter);
} else {
fileDeletePublisher = filesCollection.deleteOne(clientSession, filter);
}
return Mono.from(fileDeletePublisher).flatMap(deleteResult -> {
if (deleteResult.wasAcknowledged() && deleteResult.getDeletedCount() == 0) {
throw new MongoGridFSException(format("No file found with the ObjectId: %s", id));
}
if (clientSession == null) {
return Mono.from(chunksCollection.deleteMany(new BsonDocument("files_id", id)));
} else {
return Mono.from(chunksCollection.deleteMany(clientSession, new BsonDocument("files_id", id)));
}
}).flatMap(i -> Mono.empty());
}
use of com.mongodb.client.gridfs.model.GridFSFile in project spring-data-mongodb by spring-projects.
the class ReactiveGridFsTemplateTests method considersSkipLimitWhenQueryingFiles.
// DATAMONGO-765
@Test
public void considersSkipLimitWhenQueryingFiles() {
DataBufferFactory bufferFactory = new DefaultDataBufferFactory();
DataBuffer buffer = bufferFactory.allocateBuffer(0);
//
Flux.just(//
"a", //
"aa", //
"aaa", //
"b", //
"bb", //
"bbb", //
"c", //
"cc", //
"ccc", "d", "dd", //
"ddd").flatMap(//
fileName -> operations.store(Mono.just(buffer), fileName)).as(//
StepVerifier::create).expectNextCount(//
12).verifyComplete();
PageRequest pageRequest = PageRequest.of(2, 3, Sort.Direction.ASC, "filename");
//
operations.find(new Query().with(pageRequest)).map(//
GridFSFile::getFilename).as(//
StepVerifier::create).expectNext("c", "cc", //
"ccc").verifyComplete();
}
use of com.mongodb.client.gridfs.model.GridFSFile in project spring-data-mongodb by spring-projects.
the class GridFsTemplateIntegrationTests method convertFileToResource.
// DATAMONGO-1813
@Test
public void convertFileToResource() throws IOException {
Document metadata = new Document("key", "value");
ObjectId reference = operations.store(resource.getInputStream(), "foobar", metadata);
GridFSFile file = operations.findOne(query(whereMetaData("key").is("value")));
GridFsResource result = operations.getResource(file);
assertThat(result.contentLength()).isEqualTo(resource.contentLength());
assertThat(((BsonObjectId) result.getId()).getValue()).isEqualTo(reference);
}
use of com.mongodb.client.gridfs.model.GridFSFile in project spring-data-mongodb by spring-projects.
the class GridFsResourceUnitTests method shouldThrowExceptionOnEmptyContentTypeInMetadata.
// DATAMONGO-1850
@Test
public void shouldThrowExceptionOnEmptyContentTypeInMetadata() {
GridFSFile file = new GridFSFile(new BsonObjectId(), "foo", 0, 0, new Date(), new Document());
GridFsResource resource = new GridFsResource(file);
assertThatThrownBy(resource::getContentType).isInstanceOf(MongoGridFSException.class);
}
Aggregations