use of com.mongodb.client.gridfs.model.GridFSUploadOptions in project mongo-hadoop by mongodb.
the class GridFSInputFormatTest method uploadFile.
private static void uploadFile(final File file) throws IOException {
// Set a small chunks size so we get multiple chunks per readme.
GridFSUploadStream gridfsStream = bucket.openUploadStream(file.getName(), new GridFSUploadOptions().chunkSizeBytes(1024));
IOUtils.copy(new FileInputStream(file), gridfsStream);
gridfsStream.close();
}
use of com.mongodb.client.gridfs.model.GridFSUploadOptions in project mongo-java-driver by mongodb.
the class GridFSTour method main.
/**
* Run this main method to see the output of this quick example.
*
* @param args takes an optional single argument for the connection string
* @throws FileNotFoundException if the sample file cannot be found
* @throws IOException if there was an exception closing an input stream
*/
public static void main(final String[] args) throws FileNotFoundException, IOException {
MongoClient mongoClient;
if (args.length == 0) {
// connect to the local database server
mongoClient = new MongoClient();
} else {
mongoClient = new MongoClient(new MongoClientURI(args[0]));
}
// get handle to "mydb" database
MongoDatabase database = mongoClient.getDatabase("mydb");
database.drop();
GridFSBucket gridFSBucket = GridFSBuckets.create(database);
/*
* UploadFromStream Example
*/
// Get the input stream
InputStream streamToUploadFrom = new ByteArrayInputStream("Hello World".getBytes(StandardCharsets.UTF_8));
// Create some custom options
GridFSUploadOptions options = new GridFSUploadOptions().chunkSizeBytes(1024).metadata(new Document("type", "presentation"));
ObjectId fileId = gridFSBucket.uploadFromStream("mongodb-tutorial", streamToUploadFrom, options);
streamToUploadFrom.close();
System.out.println("The fileId of the uploaded file is: " + fileId.toHexString());
/*
* OpenUploadStream Example
*/
// Get some data to write
byte[] data = "Data to upload into GridFS".getBytes(StandardCharsets.UTF_8);
GridFSUploadStream uploadStream = gridFSBucket.openUploadStream("sampleData");
uploadStream.write(data);
uploadStream.close();
System.out.println("The fileId of the uploaded file is: " + uploadStream.getObjectId().toHexString());
/*
* Find documents
*/
gridFSBucket.find().forEach(new Block<GridFSFile>() {
@Override
public void apply(final GridFSFile gridFSFile) {
System.out.println(gridFSFile.getFilename());
}
});
/*
* Find documents with a filter
*/
gridFSBucket.find(eq("metadata.contentType", "image/png")).forEach(new Block<GridFSFile>() {
@Override
public void apply(final GridFSFile gridFSFile) {
System.out.println(gridFSFile.getFilename());
}
});
/*
* DownloadToStream
*/
FileOutputStream streamToDownloadTo = new FileOutputStream("/tmp/mongodb-tutorial.txt");
gridFSBucket.downloadToStream(fileId, streamToDownloadTo);
streamToDownloadTo.close();
/*
* DownloadToStreamByName
*/
streamToDownloadTo = new FileOutputStream("/tmp/mongodb-tutorial.txt");
GridFSDownloadOptions downloadOptions = new GridFSDownloadOptions().revision(0);
gridFSBucket.downloadToStream("mongodb-tutorial", streamToDownloadTo, downloadOptions);
streamToDownloadTo.close();
/*
* OpenDownloadStream
*/
GridFSDownloadStream downloadStream = gridFSBucket.openDownloadStream(fileId);
int fileLength = (int) downloadStream.getGridFSFile().getLength();
byte[] bytesToWriteTo = new byte[fileLength];
downloadStream.read(bytesToWriteTo);
downloadStream.close();
System.out.println(new String(bytesToWriteTo, StandardCharsets.UTF_8));
/*
* OpenDownloadStreamByName
*/
downloadStream = gridFSBucket.openDownloadStream("sampleData");
fileLength = (int) downloadStream.getGridFSFile().getLength();
bytesToWriteTo = new byte[fileLength];
downloadStream.read(bytesToWriteTo);
downloadStream.close();
System.out.println(new String(bytesToWriteTo, StandardCharsets.UTF_8));
/*
* Rename
*/
gridFSBucket.rename(fileId, "mongodbTutorial");
/*
* Delete
*/
gridFSBucket.delete(fileId);
database.drop();
}
use of com.mongodb.client.gridfs.model.GridFSUploadOptions in project mongo-java-driver by mongodb.
the class GridFSTest method doUpload.
private void doUpload(final BsonDocument rawArguments, final BsonDocument assertion) {
Throwable error = null;
ObjectId objectId = null;
BsonDocument arguments = parseHexDocument(rawArguments, "source");
try {
final String filename = arguments.getString("filename").getValue();
final InputStream inputStream = new ByteArrayInputStream(arguments.getBinary("source").getData());
final GridFSUploadOptions options = new GridFSUploadOptions();
BsonDocument rawOptions = arguments.getDocument("options", new BsonDocument());
if (rawOptions.containsKey("chunkSizeBytes")) {
options.chunkSizeBytes(rawOptions.getInt32("chunkSizeBytes").getValue());
}
if (rawOptions.containsKey("metadata")) {
options.metadata(Document.parse(rawOptions.getDocument("metadata").toJson()));
}
objectId = new MongoOperation<ObjectId>() {
@Override
public void execute() {
gridFSBucket.uploadFromStream(filename, toAsyncInputStream(inputStream), options, getCallback());
}
}.get();
} catch (Throwable e) {
error = e;
}
if (assertion.containsKey("error")) {
// We don't need to read anything more so don't see the extra chunk
if (!assertion.getString("error").getValue().equals("ExtraChunk")) {
assertNotNull("Should have thrown an exception", error);
}
} else {
assertNull("Should not have thrown an exception", error);
for (BsonValue rawDataItem : assertion.getArray("data", new BsonArray())) {
BsonDocument dataItem = rawDataItem.asDocument();
String insert = dataItem.getString("insert", new BsonString("none")).getValue();
if (insert.equals("expected.files")) {
List<BsonDocument> documents = processFiles(dataItem.getArray("documents", new BsonArray()), new ArrayList<BsonDocument>());
assertEquals(getFilesCount(new BsonDocument()), documents.size());
BsonDocument actual = new MongoOperation<BsonDocument>() {
@Override
public void execute() {
filesCollection.find().first(getCallback());
}
}.get();
for (BsonDocument expected : documents) {
assertEquals(expected.get("length"), actual.get("length"));
assertEquals(expected.get("chunkSize"), actual.get("chunkSize"));
assertEquals(expected.get("md5"), actual.get("md5"));
assertEquals(expected.get("filename"), actual.get("filename"));
if (expected.containsKey("metadata")) {
assertEquals(expected.get("metadata"), actual.get("metadata"));
}
}
} else if (insert.equals("expected.chunks")) {
List<BsonDocument> documents = processChunks(dataItem.getArray("documents", new BsonArray()), new ArrayList<BsonDocument>());
assertEquals(getChunksCount(new BsonDocument()), documents.size());
List<BsonDocument> actualDocuments = new MongoOperation<List<BsonDocument>>() {
@Override
public void execute() {
chunksCollection.find().into(new ArrayList<BsonDocument>(), getCallback());
}
}.get();
for (int i = 0; i < documents.size(); i++) {
BsonDocument expected = documents.get(i);
BsonDocument actual;
actual = actualDocuments.get(i);
assertEquals(new BsonObjectId(objectId), actual.getObjectId("files_id"));
assertEquals(expected.get("n"), actual.get("n"));
assertEquals(expected.get("data"), actual.get("data"));
}
}
}
}
}
use of com.mongodb.client.gridfs.model.GridFSUploadOptions in project mongo-java-driver by mongodb.
the class GridFSTest method doUpload.
private void doUpload(final BsonDocument rawArguments, final BsonDocument assertion) {
Throwable error = null;
ObjectId objectId = null;
BsonDocument arguments = parseHexDocument(rawArguments, "source");
try {
String filename = arguments.getString("filename").getValue();
InputStream input = new ByteArrayInputStream(arguments.getBinary("source").getData());
GridFSUploadOptions options = new GridFSUploadOptions();
BsonDocument rawOptions = arguments.getDocument("options", new BsonDocument());
if (rawOptions.containsKey("chunkSizeBytes")) {
options = options.chunkSizeBytes(rawOptions.getInt32("chunkSizeBytes").getValue());
}
if (rawOptions.containsKey("metadata")) {
options = options.metadata(Document.parse(rawOptions.getDocument("metadata").toJson()));
}
objectId = gridFSBucket.uploadFromStream(filename, input, options);
} catch (Throwable e) {
error = e;
}
if (assertion.containsKey("error")) {
// We don't need to read anything more so don't see the extra chunk
if (!assertion.getString("error").getValue().equals("ExtraChunk")) {
assertNotNull("Should have thrown an exception", error);
}
} else {
assertNull("Should not have thrown an exception", error);
for (BsonValue rawDataItem : assertion.getArray("data", new BsonArray())) {
BsonDocument dataItem = rawDataItem.asDocument();
String insert = dataItem.getString("insert", new BsonString("none")).getValue();
if (insert.equals("expected.files")) {
List<BsonDocument> documents = processFiles(dataItem.getArray("documents", new BsonArray()), new ArrayList<BsonDocument>());
assertEquals(filesCollection.count(), documents.size());
BsonDocument actual = filesCollection.find().first();
for (BsonDocument expected : documents) {
assertEquals(expected.get("length"), actual.get("length"));
assertEquals(expected.get("chunkSize"), actual.get("chunkSize"));
assertEquals(expected.get("md5"), actual.get("md5"));
assertEquals(expected.get("filename"), actual.get("filename"));
if (expected.containsKey("metadata")) {
assertEquals(expected.get("metadata"), actual.get("metadata"));
}
}
} else if (insert.equals("expected.chunks")) {
List<BsonDocument> documents = processChunks(dataItem.getArray("documents", new BsonArray()), new ArrayList<BsonDocument>());
assertEquals(chunksCollection.count(), documents.size());
List<BsonDocument> actualDocuments = chunksCollection.find().into(new ArrayList<BsonDocument>());
for (int i = 0; i < documents.size(); i++) {
BsonDocument expected = documents.get(i);
BsonDocument actual = actualDocuments.get(i);
assertEquals(new BsonObjectId(objectId), actual.getObjectId("files_id"));
assertEquals(expected.get("n"), actual.get("n"));
assertEquals(expected.get("data"), actual.get("data"));
}
}
}
}
}
Aggregations