use of com.sleepycat.je.EnvironmentConfig in project GeoGig by boundlessgeo.
the class EnvironmentBuilder method get.
/**
* @return
* @see com.google.inject.Provider#get()
*/
@Override
public synchronized Environment get() {
final Optional<URL> repoUrl = new ResolveGeogigDir(platform).call();
if (!repoUrl.isPresent() && absolutePath == null) {
throw new IllegalStateException("Can't find geogig repository home");
}
final File storeDirectory;
if (absolutePath != null) {
storeDirectory = absolutePath;
} else {
File currDir;
try {
currDir = new File(repoUrl.get().toURI());
} catch (URISyntaxException e) {
throw Throwables.propagate(e);
}
File dir = currDir;
for (String subdir : path) {
dir = new File(dir, subdir);
}
storeDirectory = dir;
}
if (!storeDirectory.exists() && !storeDirectory.mkdirs()) {
throw new IllegalStateException("Unable to create Environment directory: '" + storeDirectory.getAbsolutePath() + "'");
}
EnvironmentConfig envCfg;
if (this.forceConfig == null) {
File conf = new File(storeDirectory, "je.properties");
if (!conf.exists()) {
String resource = stagingDatabase ? "je.properties.staging" : "je.properties.objectdb";
ByteSource from = Resources.asByteSource((getClass().getResource(resource)));
try {
from.copyTo(Files.asByteSink(conf));
} catch (IOException e) {
Throwables.propagate(e);
}
}
// use the default settings
envCfg = new EnvironmentConfig();
envCfg.setAllowCreate(true);
envCfg.setCacheMode(CacheMode.MAKE_COLD);
envCfg.setLockTimeout(5, TimeUnit.SECONDS);
envCfg.setDurability(Durability.COMMIT_SYNC);
// envCfg.setReadOnly(readOnly);
} else {
envCfg = this.forceConfig;
}
// // envCfg.setSharedCache(true);
// //
// final boolean transactional = false;
// envCfg.setTransactional(transactional);
// envCfg.setCachePercent(75);// Use up to 50% of the heap size for the shared db cache
// envCfg.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, String.valueOf(256 * 1024 * 1024));
// // check <http://www.oracle.com/technetwork/database/berkeleydb/je-faq-096044.html#35>
// envCfg.setConfigParam("je.evictor.lruOnly", "false");
// envCfg.setConfigParam("je.evictor.nodesPerScan", "100");
//
// envCfg.setConfigParam(EnvironmentConfig.CLEANER_MIN_UTILIZATION, "25");
// envCfg.setConfigParam(EnvironmentConfig.CHECKPOINTER_HIGH_PRIORITY, "true");
//
// envCfg.setConfigParam(EnvironmentConfig.CLEANER_THREADS, "4");
// // TODO: check whether we can set is locking to false
// envCfg.setConfigParam(EnvironmentConfig.ENV_IS_LOCKING, String.valueOf(transactional));
//
// envCfg.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER,
// String.valueOf(!transactional));
// envCfg.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, String.valueOf(!transactional));
//
// // envCfg.setConfigParam(EnvironmentConfig.ENV_RUN_EVICTOR, "false");
Environment env;
try {
env = new Environment(storeDirectory, envCfg);
} catch (RuntimeException lockedEx) {
// lockedEx.printStackTrace();
if (readOnly) {
// this happens when trying to open the env in read only mode when its already open
// in read/write mode inside the same process. So we re-open it read-write but the
// database itself will be open read-only by JEObjectDatabase.
envCfg.setReadOnly(true);
env = new Environment(storeDirectory, envCfg);
} else {
throw lockedEx;
}
}
return env;
}
use of com.sleepycat.je.EnvironmentConfig in project sirix by sirixdb.
the class BerkeleyStorageFactory method createStorage.
/**
* Create a new storage.
*
* @param resourceConfig the resource configuration
* @return the berkeley DB storage
* @throws NullPointerException if {@link ResourceConfiguration} is {@code null}
* @throws SirixIOException if the storage couldn't be created because of an I/O exception
*/
public BerkeleyStorage createStorage(final ResourceConfiguration resourceConfig) {
try {
final Path repoFile = resourceConfig.mPath.resolve(ResourceConfiguration.ResourcePaths.DATA.getFile());
if (!Files.exists(repoFile)) {
Files.createDirectories(repoFile);
}
final ByteHandlePipeline byteHandler = checkNotNull(resourceConfig.mByteHandler);
final DatabaseConfig conf = generateDBConf();
final EnvironmentConfig config = generateEnvConf();
final List<Path> path;
try (final Stream<Path> stream = Files.list(repoFile)) {
path = stream.collect(toList());
}
if (path.isEmpty() || (path.size() == 1 && "sirix.data".equals(path.get(0).getFileName().toString()))) {
conf.setAllowCreate(true);
config.setAllowCreate(true);
}
final Environment env = new Environment(repoFile.toFile(), config);
final Database database = env.openDatabase(null, NAME, conf);
return new BerkeleyStorage(env, database, byteHandler);
} catch (final DatabaseException | IOException e) {
throw new SirixIOException(e);
}
}
use of com.sleepycat.je.EnvironmentConfig in project sirix by sirixdb.
the class BerkeleyStorageFactory method generateEnvConf.
/**
* Generate {@link EnvironmentConfig} reference.
*
* @return transactional environment configuration
*/
private static EnvironmentConfig generateEnvConf() {
final EnvironmentConfig config = new EnvironmentConfig();
config.setTransactional(true);
config.setCacheSize(1024 * 1024);
return config;
}
use of com.sleepycat.je.EnvironmentConfig in project leopard by tanhaichao.
the class BdbImpl method init.
public void init() throws EnvironmentLockedException, DatabaseException {
EnvironmentConfig environmentConfig = new EnvironmentConfig();
DatabaseConfig dbConfig = new DatabaseConfig();
dbConfig.setAllowCreate(true);
dbConfig.setSortedDuplicates(true);
environmentConfig.setReadOnly(false);
environmentConfig.setAllowCreate(true);
// Open the environment and entity store
if (!dataDir.exists()) {
dataDir.mkdirs();
}
environment = new Environment(dataDir, environmentConfig);
// Database database = environment.openDatabase(transaction, "BDB", dbConfig);
this.bdb = new BdbDatabaseImpl(environment, "DEFAULT");
}
use of com.sleepycat.je.EnvironmentConfig in project voldemort by voldemort.
the class BdbGrowth method main.
public static void main(String[] args) throws Exception {
if (args.length != 5) {
System.err.println("USAGE: java BdbGrowth directory cache_size total_size increment threads");
System.exit(1);
}
final String dir = args[0];
final long cacheSize = Long.parseLong(args[1]);
final int totalSize = Integer.parseInt(args[2]);
final int increment = Integer.parseInt(args[3]);
final int threads = Integer.parseInt(args[4]);
Environment environment;
EnvironmentConfig environmentConfig;
DatabaseConfig databaseConfig;
environmentConfig = new EnvironmentConfig();
environmentConfig.setCacheSize(cacheSize);
environmentConfig.setDurability(Durability.COMMIT_NO_SYNC);
environmentConfig.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, "1000000000");
environmentConfig.setConfigParam(EnvironmentConfig.CLEANER_MAX_BATCH_FILES, "100");
environmentConfig.setConfigParam(EnvironmentConfig.CLEANER_READ_SIZE, "52428800");
environmentConfig.setAllowCreate(true);
environmentConfig.setTransactional(true);
databaseConfig = new DatabaseConfig();
databaseConfig.setAllowCreate(true);
// databaseConfig.setDeferredWrite(true);
databaseConfig.setTransactional(true);
databaseConfig.setNodeMaxEntries(1024);
File bdbDir = new File(dir);
if (!bdbDir.exists()) {
bdbDir.mkdir();
} else {
for (File f : bdbDir.listFiles()) f.delete();
}
environment = new Environment(bdbDir, environmentConfig);
final Database db = environment.openDatabase(null, "test", databaseConfig);
final Random rand = new Random();
int iterations = totalSize / increment;
long[] readTimes = new long[iterations];
long[] writeTimes = new long[iterations];
ExecutorService service = Executors.newFixedThreadPool(threads);
for (int i = 0; i < iterations; i++) {
System.out.println("Starting iteration " + i);
List<Future<Object>> results = new ArrayList<Future<Object>>(increment);
long startTime = System.currentTimeMillis();
final int fi = i;
for (int j = 0; j < increment; j++) {
final int fj = j;
results.add(service.submit(new Callable<Object>() {
public Object call() throws Exception {
db.put(null, new DatabaseEntry(Integer.toString(fi * increment + fj).getBytes()), new DatabaseEntry(Integer.toString(fi * increment + fj).getBytes()));
return null;
}
}));
}
for (int j = 0; j < increment; j++) results.get(j).get();
writeTimes[i] = System.currentTimeMillis() - startTime;
System.out.println("write: " + (writeTimes[i] / (double) increment));
results.clear();
startTime = System.currentTimeMillis();
for (int j = 0; j < increment; j++) {
results.add(service.submit(new Callable<Object>() {
public Object call() throws Exception {
int value = rand.nextInt((fi + 1) * increment);
return db.get(null, new DatabaseEntry(Integer.toString(value).getBytes()), new DatabaseEntry(Integer.toString(value).getBytes()), null);
}
}));
}
for (int j = 0; j < increment; j++) results.get(j).get();
readTimes[i] = (System.currentTimeMillis() - startTime);
System.out.println("read: " + (readTimes[i] / (double) increment));
int cleaned = 0;
do {
cleaned += environment.cleanLog();
} while (cleaned > 0);
if (cleaned > 0)
System.out.println("Cleaned " + cleaned + " files.");
CheckpointConfig cp = new CheckpointConfig();
cp.setForce(true);
environment.checkpoint(null);
environment.compress();
environment.sync();
System.out.println("Cleaning, Checkpointing and compression completed.");
}
System.out.println();
System.out.println("iteration read write:");
for (int i = 0; i < iterations; i++) {
System.out.print(i);
System.out.print(" " + readTimes[i] / (double) increment);
System.out.println(" " + writeTimes[i] / (double) increment);
}
System.out.println(environment.getStats(null));
System.exit(0);
}
Aggregations