use of com.mongodb.gridfs.GridFSInputFile in project beam by apache.
the class MongoDBGridFSIOTest method setup.
@BeforeClass
public static void setup() throws Exception {
try (ServerSocket serverSocket = new ServerSocket(0)) {
port = serverSocket.getLocalPort();
}
LOG.info("Starting MongoDB embedded instance on {}", port);
try {
Files.forceDelete(new File(MONGODB_LOCATION));
} catch (Exception e) {
}
new File(MONGODB_LOCATION).mkdirs();
IMongodConfig mongodConfig = new MongodConfigBuilder().version(Version.Main.PRODUCTION).configServer(false).replication(new Storage(MONGODB_LOCATION, null, 0)).net(new Net("localhost", port, Network.localhostIsIPv6())).cmdOptions(new MongoCmdOptionsBuilder().syncDelay(10).useNoPrealloc(true).useSmallFiles(true).useNoJournal(true).build()).build();
mongodExecutable = mongodStarter.prepare(mongodConfig);
mongodProcess = mongodExecutable.start();
LOG.info("Insert test data");
Mongo client = new Mongo("localhost", port);
DB database = client.getDB(DATABASE);
GridFS gridfs = new GridFS(database);
ByteArrayOutputStream out = new ByteArrayOutputStream();
for (int x = 0; x < 100; x++) {
out.write(("Einstein\nDarwin\nCopernicus\nPasteur\n" + "Curie\nFaraday\nNewton\nBohr\nGalilei\nMaxwell\n").getBytes());
}
for (int x = 0; x < 5; x++) {
gridfs.createFile(new ByteArrayInputStream(out.toByteArray()), "file" + x).save();
}
gridfs = new GridFS(database, "mapBucket");
long now = System.currentTimeMillis();
Random random = new Random();
String[] scientists = { "Einstein", "Darwin", "Copernicus", "Pasteur", "Curie", "Faraday", "Newton", "Bohr", "Galilei", "Maxwell" };
for (int x = 0; x < 10; x++) {
GridFSInputFile file = gridfs.createFile("file_" + x);
OutputStream outf = file.getOutputStream();
OutputStreamWriter writer = new OutputStreamWriter(outf);
for (int y = 0; y < 5000; y++) {
long time = now - random.nextInt(3600000);
String name = scientists[y % scientists.length];
writer.write(Long.toString(time) + "\t");
writer.write(name + "\t");
writer.write(Integer.toString(random.nextInt(100)));
writer.write("\n");
}
for (int y = 0; y < scientists.length; y++) {
String name = scientists[y % scientists.length];
writer.write(Long.toString(now) + "\t");
writer.write(name + "\t");
writer.write("101");
writer.write("\n");
}
writer.flush();
writer.close();
}
client.close();
}
use of com.mongodb.gridfs.GridFSInputFile in project nanopub-server by tkuhn.
the class NanopubDb method writePackageToStream.
public void writePackageToStream(long pageNo, boolean gzipped, OutputStream out) throws IOException {
if (pageNo < 1 || pageNo >= journal.getCurrentPageNo()) {
throw new IllegalArgumentException("Not a complete page: " + pageNo);
}
GridFSDBFile f = packageGridFs.findOne(pageNo + "");
OutputStream packageOut = null;
InputStream packageAsStream = null;
try {
if (f == null) {
if (gzipped) {
out = new GZIPOutputStream(out);
}
ByteArrayOutputStream bOut = new ByteArrayOutputStream();
packageOut = new GZIPOutputStream(bOut);
String pageContent = journal.getPageContent(pageNo);
for (String uri : pageContent.split("\\n")) {
Nanopub np = getNanopub(TrustyUriUtils.getArtifactCode(uri));
String s;
try {
s = NanopubUtils.writeToString(np, RDFFormat.TRIG);
} catch (RDFHandlerException ex) {
throw new RuntimeException("Unexpected RDF handler exception", ex);
}
byte[] bytes = (s + "\n").getBytes();
out.write(bytes);
packageOut.write(bytes);
}
packageOut.close();
packageAsStream = new ByteArrayInputStream(bOut.toByteArray());
GridFSInputFile i = packageGridFs.createFile(packageAsStream);
i.setFilename(pageNo + "");
i.save();
} else {
if (gzipped) {
f.writeTo(out);
} else {
GZIPInputStream in = new GZIPInputStream(f.getInputStream());
byte[] buffer = new byte[1024];
int len;
while ((len = in.read(buffer)) > 0) {
out.write(buffer, 0, len);
}
in.close();
}
}
} finally {
if (out != null)
out.close();
if (packageOut != null)
packageOut.close();
if (packageAsStream != null)
packageAsStream.close();
}
}
use of com.mongodb.gridfs.GridFSInputFile in project teiid by teiid.
the class MongoDBExecutionFactory method convertToMongoType.
/**
* Mongodb only supports certain data types, Teiid need to serialize them in other compatible
* formats, and convert them back while reading them.
* @param value
* @return
*/
public Object convertToMongoType(Object value, DB mongoDB, String fqn) throws TranslatorException {
if (value == null) {
return null;
}
try {
if (value instanceof BigDecimal) {
return ((BigDecimal) value).doubleValue();
} else if (value instanceof BigInteger) {
return ((BigInteger) value).doubleValue();
} else if (value instanceof Character) {
return ((Character) value).toString();
} else if (value instanceof java.sql.Date) {
return new java.util.Date(((java.sql.Date) value).getTime());
} else if (value instanceof java.sql.Time) {
return new java.util.Date(((java.sql.Time) value).getTime());
} else if (value instanceof java.sql.Timestamp) {
return new java.util.Date(((java.sql.Timestamp) value).getTime());
} else if (value instanceof BinaryType) {
return new Binary(((BinaryType) value).getBytes());
} else if (value instanceof byte[]) {
return new Binary((byte[]) value);
} else if (!(value instanceof GeometryType) && value instanceof Blob) {
String uuid = UUID.randomUUID().toString();
GridFS gfs = new GridFS(mongoDB, fqn);
GridFSInputFile gfsFile = gfs.createFile(((Blob) value).getBinaryStream());
gfsFile.setFilename(uuid);
gfsFile.save();
return uuid;
} else if (value instanceof Clob) {
String uuid = UUID.randomUUID().toString();
GridFS gfs = new GridFS(mongoDB, fqn);
GridFSInputFile gfsFile = gfs.createFile(((Clob) value).getAsciiStream());
gfsFile.setFilename(uuid);
gfsFile.save();
return uuid;
} else if (value instanceof SQLXML) {
String uuid = UUID.randomUUID().toString();
GridFS gfs = new GridFS(mongoDB, fqn);
GridFSInputFile gfsFile = gfs.createFile(((SQLXML) value).getBinaryStream());
gfsFile.setFilename(uuid);
gfsFile.save();
return uuid;
} else if (value instanceof Object[]) {
BasicDBList list = new BasicDBList();
for (Object obj : (Object[]) value) {
list.add(obj);
}
return list;
}
return value;
} catch (SQLException e) {
throw new TranslatorException(e);
}
}
use of com.mongodb.gridfs.GridFSInputFile in project commons by craftercms.
the class AbstractJongoRepository method saveFile.
@Override
public FileInfo saveFile(final InputStream inputStream, final String storeName, final String contentType, final ObjectId fileId) throws MongoDataException, FileExistsException {
try {
if (gridfs.findOne(storeName) != null) {
log.error("A file named {} already exists", storeName);
throw new FileExistsException("File with name " + storeName + " already Exists");
}
GridFSInputFile savedFile = gridfs.createFile(inputStream, storeName, true);
savedFile.setContentType(contentType);
if (fileId != null) {
log.debug("Saving file with given Id {} probably a update", fileId);
savedFile.setId(fileId);
}
savedFile.save();
FileInfo fileInfo = new FileInfo(savedFile, false);
log.debug("File {} was saved " + fileInfo);
return fileInfo;
} catch (MongoException ex) {
log.error("Unable to save file");
throw new MongoDataException("Unable to save file to GridFs", ex);
}
}
use of com.mongodb.gridfs.GridFSInputFile in project mongomvcc by igd-geo.
the class DefaultConvertStrategy method convert.
@Override
public long convert(Object data) {
GridFSInputFile file;
if (data instanceof byte[]) {
file = _gridFS.createFile((byte[]) data);
file.put(BINARY_TYPE, BYTEARRAY);
} else if (data instanceof float[]) {
file = _gridFS.createFile(new FloatArrayInputStream((float[]) data));
file.put(BINARY_TYPE, FLOATARRAY);
} else if (data instanceof InputStream) {
file = _gridFS.createFile((InputStream) data);
file.put(BINARY_TYPE, INPUTSTREAM);
} else if (data instanceof ByteBuffer) {
ByteBuffer bb = (ByteBuffer) data;
byte[] buf;
if (bb.hasArray()) {
buf = bb.array();
} else {
bb.rewind();
buf = new byte[bb.remaining()];
bb.get(buf);
}
file = _gridFS.createFile(buf);
file.put(BINARY_TYPE, BYTEBUFFER);
} else if (data instanceof FloatBuffer) {
FloatBuffer bb = (FloatBuffer) data;
float[] buf;
if (bb.hasArray()) {
buf = bb.array();
} else {
bb.rewind();
buf = new float[bb.remaining()];
bb.get(buf);
}
file = _gridFS.createFile(new FloatArrayInputStream(buf));
file.put(BINARY_TYPE, FLOATBUFFER);
} else {
return 0;
}
long oid = _counter.getNextId();
file.put(MongoDBVLargeCollection.OID, oid);
_convertedFiles.add(file);
return oid;
}
Aggregations