Search in sources :

Example 1 with JniDBFactory

use of org.fusesource.leveldbjni.JniDBFactory in project hadoop by apache.

the class LevelDBCacheTimelineStore method serviceInit.

@Override
protected synchronized void serviceInit(Configuration conf) throws Exception {
    configuration = conf;
    Options options = new Options();
    options.createIfMissing(true);
    options.cacheSize(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_CACHE_READ_CACHE_SIZE, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_CACHE_READ_CACHE_SIZE));
    JniDBFactory factory = new JniDBFactory();
    Path dbPath = new Path(conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH), dbId + CACHED_LDB_FILE_PREFIX);
    FileSystem localFS = null;
    try {
        localFS = FileSystem.getLocal(conf);
        if (!localFS.exists(dbPath)) {
            if (!localFS.mkdirs(dbPath)) {
                throw new IOException("Couldn't create directory for leveldb " + "timeline store " + dbPath);
            }
            localFS.setPermission(dbPath, LeveldbUtils.LEVELDB_DIR_UMASK);
        }
    } finally {
        IOUtils.cleanup(LOG, localFS);
    }
    LOG.info("Using leveldb path " + dbPath);
    entityDb = factory.open(new File(dbPath.toString()), options);
    entities = new LevelDBMapAdapter<>(entityDb);
    super.serviceInit(conf);
}
Also used : Path(org.apache.hadoop.fs.Path) Options(org.iq80.leveldb.Options) FileSystem(org.apache.hadoop.fs.FileSystem) JniDBFactory(org.fusesource.leveldbjni.JniDBFactory) IOException(java.io.IOException) File(java.io.File)

Example 2 with JniDBFactory

use of org.fusesource.leveldbjni.JniDBFactory in project hadoop by apache.

the class LeveldbTimelineStateStore method startStorage.

@Override
protected void startStorage() throws IOException {
    Options options = new Options();
    Path dbPath = new Path(getConfig().get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_STATE_STORE_PATH), DB_NAME);
    FileSystem localFS = null;
    try {
        localFS = FileSystem.getLocal(getConfig());
        if (!localFS.exists(dbPath)) {
            if (!localFS.mkdirs(dbPath)) {
                throw new IOException("Couldn't create directory for leveldb " + "timeline store " + dbPath);
            }
            localFS.setPermission(dbPath, LEVELDB_DIR_UMASK);
        }
    } finally {
        IOUtils.cleanup(LOG, localFS);
    }
    JniDBFactory factory = new JniDBFactory();
    try {
        options.createIfMissing(false);
        db = factory.open(new File(dbPath.toString()), options);
        LOG.info("Loading the existing database at th path: " + dbPath.toString());
        checkVersion();
    } catch (NativeDB.DBException e) {
        if (e.isNotFound() || e.getMessage().contains(" does not exist ")) {
            try {
                options.createIfMissing(true);
                db = factory.open(new File(dbPath.toString()), options);
                LOG.info("Creating a new database at th path: " + dbPath.toString());
                storeVersion(CURRENT_VERSION_INFO);
            } catch (DBException ex) {
                throw new IOException(ex);
            }
        } else {
            throw new IOException(e);
        }
    } catch (DBException e) {
        throw new IOException(e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Options(org.iq80.leveldb.Options) DBException(org.iq80.leveldb.DBException) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) JniDBFactory(org.fusesource.leveldbjni.JniDBFactory) NativeDB(org.fusesource.leveldbjni.internal.NativeDB) File(java.io.File)

Example 3 with JniDBFactory

use of org.fusesource.leveldbjni.JniDBFactory in project hadoop by apache.

the class RollingLevelDBTimelineStore method serviceInit.

@Override
@SuppressWarnings("unchecked")
protected void serviceInit(Configuration conf) throws Exception {
    Preconditions.checkArgument(conf.getLong(TIMELINE_SERVICE_TTL_MS, DEFAULT_TIMELINE_SERVICE_TTL_MS) > 0, "%s property value should be greater than zero", TIMELINE_SERVICE_TTL_MS);
    Preconditions.checkArgument(conf.getLong(TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS, DEFAULT_TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS) > 0, "%s property value should be greater than zero", TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS);
    Preconditions.checkArgument(conf.getLong(TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE) >= 0, "%s property value should be greater than or equal to zero", TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE);
    Preconditions.checkArgument(conf.getLong(TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE) > 0, " %s property value should be greater than zero", TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE);
    Preconditions.checkArgument(conf.getLong(TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE) > 0, "%s property value should be greater than zero", TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE);
    Preconditions.checkArgument(conf.getLong(TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES, DEFAULT_TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES) > 0, "%s property value should be greater than zero", TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES);
    Preconditions.checkArgument(conf.getLong(TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE) > 0, "%s property value should be greater than zero", TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE);
    Options options = new Options();
    options.createIfMissing(true);
    options.cacheSize(conf.getLong(TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE));
    JniDBFactory factory = new JniDBFactory();
    Path dbPath = new Path(conf.get(TIMELINE_SERVICE_LEVELDB_PATH), FILENAME);
    Path domainDBPath = new Path(dbPath, DOMAIN);
    Path starttimeDBPath = new Path(dbPath, STARTTIME);
    Path ownerDBPath = new Path(dbPath, OWNER);
    FileSystem localFS = null;
    try {
        localFS = FileSystem.getLocal(conf);
        if (!localFS.exists(dbPath)) {
            if (!localFS.mkdirs(dbPath)) {
                throw new IOException("Couldn't create directory for leveldb " + "timeline store " + dbPath);
            }
            localFS.setPermission(dbPath, LEVELDB_DIR_UMASK);
        }
        if (!localFS.exists(domainDBPath)) {
            if (!localFS.mkdirs(domainDBPath)) {
                throw new IOException("Couldn't create directory for leveldb " + "timeline store " + domainDBPath);
            }
            localFS.setPermission(domainDBPath, LEVELDB_DIR_UMASK);
        }
        if (!localFS.exists(starttimeDBPath)) {
            if (!localFS.mkdirs(starttimeDBPath)) {
                throw new IOException("Couldn't create directory for leveldb " + "timeline store " + starttimeDBPath);
            }
            localFS.setPermission(starttimeDBPath, LEVELDB_DIR_UMASK);
        }
        if (!localFS.exists(ownerDBPath)) {
            if (!localFS.mkdirs(ownerDBPath)) {
                throw new IOException("Couldn't create directory for leveldb " + "timeline store " + ownerDBPath);
            }
            localFS.setPermission(ownerDBPath, LEVELDB_DIR_UMASK);
        }
    } finally {
        IOUtils.cleanup(LOG, localFS);
    }
    options.maxOpenFiles(conf.getInt(TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES, DEFAULT_TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES));
    options.writeBufferSize(conf.getInt(TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE));
    LOG.info("Using leveldb path " + dbPath);
    domaindb = factory.open(new File(domainDBPath.toString()), options);
    entitydb = new RollingLevelDB(ENTITY);
    entitydb.init(conf);
    indexdb = new RollingLevelDB(INDEX);
    indexdb.init(conf);
    starttimedb = factory.open(new File(starttimeDBPath.toString()), options);
    ownerdb = factory.open(new File(ownerDBPath.toString()), options);
    checkVersion();
    startTimeWriteCache = Collections.synchronizedMap(new LRUMap(getStartTimeWriteCacheSize(conf)));
    startTimeReadCache = Collections.synchronizedMap(new LRUMap(getStartTimeReadCacheSize(conf)));
    writeBatchSize = conf.getInt(TIMELINE_SERVICE_LEVELDB_WRITE_BATCH_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BATCH_SIZE);
    super.serviceInit(conf);
}
Also used : Path(org.apache.hadoop.fs.Path) Options(org.iq80.leveldb.Options) ReadOptions(org.iq80.leveldb.ReadOptions) LRUMap(org.apache.commons.collections.map.LRUMap) FileSystem(org.apache.hadoop.fs.FileSystem) JniDBFactory(org.fusesource.leveldbjni.JniDBFactory) IOException(java.io.IOException) File(java.io.File)

Example 4 with JniDBFactory

use of org.fusesource.leveldbjni.JniDBFactory in project hadoop by apache.

the class LeveldbTimelineStore method serviceInit.

@Override
@SuppressWarnings("unchecked")
protected void serviceInit(Configuration conf) throws Exception {
    Preconditions.checkArgument(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_TTL_MS, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_TTL_MS) > 0, "%s property value should be greater than zero", YarnConfiguration.TIMELINE_SERVICE_TTL_MS);
    Preconditions.checkArgument(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS) > 0, "%s property value should be greater than zero", YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS);
    Preconditions.checkArgument(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE) >= 0, "%s property value should be greater than or equal to zero", YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE);
    Preconditions.checkArgument(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE) > 0, " %s property value should be greater than zero", YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE);
    Preconditions.checkArgument(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE) > 0, "%s property value should be greater than zero", YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE);
    Options options = new Options();
    options.createIfMissing(true);
    options.cacheSize(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE));
    if (factory == null) {
        factory = new JniDBFactory();
    }
    Path dbPath = new Path(conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH), FILENAME);
    FileSystem localFS = null;
    try {
        localFS = FileSystem.getLocal(conf);
        if (!localFS.exists(dbPath)) {
            if (!localFS.mkdirs(dbPath)) {
                throw new IOException("Couldn't create directory for leveldb " + "timeline store " + dbPath);
            }
            localFS.setPermission(dbPath, LEVELDB_DIR_UMASK);
        }
    } finally {
        IOUtils.cleanup(LOG, localFS);
    }
    LOG.info("Using leveldb path " + dbPath);
    try {
        db = factory.open(new File(dbPath.toString()), options);
    } catch (IOException ioe) {
        File dbFile = new File(dbPath.toString());
        File backupPath = new File(dbPath.toString() + BACKUP_EXT + Time.monotonicNow());
        LOG.warn("Incurred exception while loading LevelDb database. Backing " + "up at " + backupPath, ioe);
        FileUtils.copyDirectory(dbFile, backupPath);
        LOG.warn("Going to try repair");
        factory.repair(dbFile, options);
        db = factory.open(dbFile, options);
    }
    checkVersion();
    startTimeWriteCache = Collections.synchronizedMap(new LRUMap(getStartTimeWriteCacheSize(conf)));
    startTimeReadCache = Collections.synchronizedMap(new LRUMap(getStartTimeReadCacheSize(conf)));
    if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_TTL_ENABLE, true)) {
        deletionThread = new EntityDeletionThread(conf);
        deletionThread.start();
    }
    super.serviceInit(conf);
}
Also used : Path(org.apache.hadoop.fs.Path) LRUMap(org.apache.commons.collections.map.LRUMap) FileSystem(org.apache.hadoop.fs.FileSystem) JniDBFactory(org.fusesource.leveldbjni.JniDBFactory) IOException(java.io.IOException) File(java.io.File)

Example 5 with JniDBFactory

use of org.fusesource.leveldbjni.JniDBFactory in project hadoop by apache.

the class TestLeveldbTimelineStore method testLevelDbRepair.

@Test
public /**
   * Test that LevelDb repair is attempted at least once during
   * serviceInit for LeveldbTimelineStore in case open fails the
   * first time.
   */
void testLevelDbRepair() throws IOException {
    LeveldbTimelineStore store = new LeveldbTimelineStore();
    JniDBFactory factory = Mockito.mock(JniDBFactory.class);
    Mockito.when(factory.open(Mockito.any(File.class), Mockito.any(Options.class))).thenThrow(new IOException()).thenCallRealMethod();
    store.setFactory(factory);
    //Create the LevelDb in a different location
    File path = new File("target", this.getClass().getSimpleName() + "-tmpDir1").getAbsoluteFile();
    Configuration conf = new Configuration(this.config);
    conf.set(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH, path.getAbsolutePath());
    try {
        store.init(conf);
        Mockito.verify(factory, Mockito.times(1)).repair(Mockito.any(File.class), Mockito.any(Options.class));
        FileFilter fileFilter = new WildcardFileFilter("*" + LeveldbTimelineStore.BACKUP_EXT + "*");
        Assert.assertTrue(path.listFiles(fileFilter).length > 0);
    } finally {
        store.close();
        fsContext.delete(new Path(path.getAbsolutePath()), true);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Options(org.iq80.leveldb.Options) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) JniDBFactory(org.fusesource.leveldbjni.JniDBFactory) IOException(java.io.IOException) FileFilter(java.io.FileFilter) WildcardFileFilter(org.apache.commons.io.filefilter.WildcardFileFilter) File(java.io.File) WildcardFileFilter(org.apache.commons.io.filefilter.WildcardFileFilter) Test(org.junit.Test)

Aggregations

File (java.io.File)5 IOException (java.io.IOException)5 Path (org.apache.hadoop.fs.Path)5 JniDBFactory (org.fusesource.leveldbjni.JniDBFactory)5 FileSystem (org.apache.hadoop.fs.FileSystem)4 Options (org.iq80.leveldb.Options)4 LRUMap (org.apache.commons.collections.map.LRUMap)2 FileFilter (java.io.FileFilter)1 WildcardFileFilter (org.apache.commons.io.filefilter.WildcardFileFilter)1 Configuration (org.apache.hadoop.conf.Configuration)1 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)1 NativeDB (org.fusesource.leveldbjni.internal.NativeDB)1 DBException (org.iq80.leveldb.DBException)1 ReadOptions (org.iq80.leveldb.ReadOptions)1 Test (org.junit.Test)1