use of org.iq80.leveldb.Options in project qi4j-sdk by Qi4j.
the class LevelDBEntityStoreMixin method activateService.
@Override
public void activateService() throws Exception {
charset = Charset.forName("UTF-8");
configuration.refresh();
LevelDBEntityStoreConfiguration config = configuration.get();
// Choose flavour
String flavour = config.flavour().get();
DBFactory factory;
if ("jni".equalsIgnoreCase(flavour)) {
factory = newJniDBFactory();
} else if ("java".equalsIgnoreCase(flavour)) {
factory = newJavaDBFactory();
} else {
factory = newDBFactory();
}
// Apply configuration
Options options = new Options();
options.createIfMissing(true);
if (config.blockRestartInterval().get() != null) {
options.blockRestartInterval(config.blockRestartInterval().get());
}
if (config.blockSize().get() != null) {
options.blockSize(config.blockSize().get());
}
if (config.cacheSize().get() != null) {
options.cacheSize(config.cacheSize().get());
}
if (config.compression().get() != null) {
options.compressionType(config.compression().get() ? CompressionType.SNAPPY : CompressionType.NONE);
}
if (config.maxOpenFiles().get() != null) {
options.maxOpenFiles(config.maxOpenFiles().get());
}
if (config.paranoidChecks().get() != null) {
options.paranoidChecks(config.paranoidChecks().get());
}
if (config.verifyChecksums().get() != null) {
options.verifyChecksums(config.verifyChecksums().get());
}
if (config.writeBufferSize().get() != null) {
options.writeBufferSize(config.writeBufferSize().get());
}
// Open/Create the database
File dbFile = new File(fileConfig.dataDirectory(), descriptor.identity());
db = factory.open(dbFile, options);
}
use of org.iq80.leveldb.Options in project Mycat-Server by MyCATApache.
the class LevelDBCachePooFactory method createCachePool.
@Override
public CachePool createCachePool(String poolName, int cacheSize, int expireSeconds) {
Options options = new Options();
// cacheSize M 大小
options.cacheSize(cacheSize * 1048576);
options.createIfMissing(true);
DB db = null;
try {
db = factory.open(new File("leveldb\\" + poolName), options);
// Use the db in here....
} catch (Exception e) {
// Make sure you close the db to shutdown the
// database and avoid resource leaks.
// db.close();
}
return new LevelDBPool(poolName, db, cacheSize);
}
use of org.iq80.leveldb.Options in project distributedlog by twitter.
the class ReaderWithOffsets method main.
public static void main(String[] args) throws Exception {
if (4 != args.length) {
System.out.println(HELP);
return;
}
String dlUriStr = args[0];
final String streamName = args[1];
final String readerId = args[2];
final String offsetStoreFile = args[3];
URI uri = URI.create(dlUriStr);
DistributedLogConfiguration conf = new DistributedLogConfiguration();
DistributedLogNamespace namespace = DistributedLogNamespaceBuilder.newBuilder().conf(conf).uri(uri).build();
// open the dlm
System.out.println("Opening log stream " + streamName);
DistributedLogManager dlm = namespace.openLog(streamName);
// open the offset store
Options options = new Options();
options.createIfMissing(true);
final DB offsetDB = factory.open(new File(offsetStoreFile), options);
final AtomicReference<DLSN> lastDLSN = new AtomicReference<DLSN>(null);
// offset updater
final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
executorService.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
if (null != lastDLSN.get()) {
offsetDB.put(readerId.getBytes(UTF_8), lastDLSN.get().serializeBytes());
System.out.println("Updated reader " + readerId + " offset to " + lastDLSN.get());
}
}
}, 10, 10, TimeUnit.SECONDS);
try {
byte[] offset = offsetDB.get(readerId.getBytes(UTF_8));
DLSN dlsn;
if (null == offset) {
dlsn = DLSN.InitialDLSN;
} else {
dlsn = DLSN.deserializeBytes(offset);
}
readLoop(dlm, dlsn, lastDLSN);
} finally {
offsetDB.close();
dlm.close();
namespace.close();
}
}
use of org.iq80.leveldb.Options in project hadoop by apache.
the class RollingLevelDB method initRollingLevelDB.
private void initRollingLevelDB(Long dbStartTime, Path rollingInstanceDBPath) {
if (rollingdbs.containsKey(dbStartTime)) {
return;
}
Options options = new Options();
options.createIfMissing(true);
options.cacheSize(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE));
options.maxOpenFiles(conf.getInt(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES));
options.writeBufferSize(conf.getInt(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE));
LOG.info("Initializing rolling leveldb instance :" + rollingInstanceDBPath + " for start time: " + dbStartTime);
DB db = null;
try {
db = factory.open(new File(rollingInstanceDBPath.toUri().getPath()), options);
rollingdbs.put(dbStartTime, db);
String dbName = fdf.format(dbStartTime);
LOG.info("Added rolling leveldb instance " + dbName + " to " + getName());
} catch (IOException ioe) {
LOG.warn("Failed to open rolling leveldb instance :" + new File(rollingInstanceDBPath.toUri().getPath()), ioe);
}
}
use of org.iq80.leveldb.Options in project hadoop by apache.
the class HistoryServerLeveldbStateStoreService method startStorage.
@Override
protected void startStorage() throws IOException {
Path storeRoot = createStorageDir(getConfig());
Options options = new Options();
options.createIfMissing(false);
options.logger(new LeveldbLogger());
LOG.info("Using state database at " + storeRoot + " for recovery");
File dbfile = new File(storeRoot.toString());
try {
db = JniDBFactory.factory.open(dbfile, options);
} catch (NativeDB.DBException e) {
if (e.isNotFound() || e.getMessage().contains(" does not exist ")) {
LOG.info("Creating state database at " + dbfile);
options.createIfMissing(true);
try {
db = JniDBFactory.factory.open(dbfile, options);
// store version
storeVersion();
} catch (DBException dbErr) {
throw new IOException(dbErr.getMessage(), dbErr);
}
} else {
throw e;
}
}
checkVersion();
}
Aggregations