Search in sources :

Example 1 with LRUMap

use of org.apache.commons.collections.map.LRUMap in project hadoop by apache.

the class RollingLevelDBTimelineStore method serviceInit.

@Override
@SuppressWarnings("unchecked")
protected void serviceInit(Configuration conf) throws Exception {
    Preconditions.checkArgument(conf.getLong(TIMELINE_SERVICE_TTL_MS, DEFAULT_TIMELINE_SERVICE_TTL_MS) > 0, "%s property value should be greater than zero", TIMELINE_SERVICE_TTL_MS);
    Preconditions.checkArgument(conf.getLong(TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS, DEFAULT_TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS) > 0, "%s property value should be greater than zero", TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS);
    Preconditions.checkArgument(conf.getLong(TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE) >= 0, "%s property value should be greater than or equal to zero", TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE);
    Preconditions.checkArgument(conf.getLong(TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE) > 0, " %s property value should be greater than zero", TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE);
    Preconditions.checkArgument(conf.getLong(TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE) > 0, "%s property value should be greater than zero", TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE);
    Preconditions.checkArgument(conf.getLong(TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES, DEFAULT_TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES) > 0, "%s property value should be greater than zero", TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES);
    Preconditions.checkArgument(conf.getLong(TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE) > 0, "%s property value should be greater than zero", TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE);
    Options options = new Options();
    options.createIfMissing(true);
    options.cacheSize(conf.getLong(TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE));
    JniDBFactory factory = new JniDBFactory();
    Path dbPath = new Path(conf.get(TIMELINE_SERVICE_LEVELDB_PATH), FILENAME);
    Path domainDBPath = new Path(dbPath, DOMAIN);
    Path starttimeDBPath = new Path(dbPath, STARTTIME);
    Path ownerDBPath = new Path(dbPath, OWNER);
    FileSystem localFS = null;
    try {
        localFS = FileSystem.getLocal(conf);
        if (!localFS.exists(dbPath)) {
            if (!localFS.mkdirs(dbPath)) {
                throw new IOException("Couldn't create directory for leveldb " + "timeline store " + dbPath);
            }
            localFS.setPermission(dbPath, LEVELDB_DIR_UMASK);
        }
        if (!localFS.exists(domainDBPath)) {
            if (!localFS.mkdirs(domainDBPath)) {
                throw new IOException("Couldn't create directory for leveldb " + "timeline store " + domainDBPath);
            }
            localFS.setPermission(domainDBPath, LEVELDB_DIR_UMASK);
        }
        if (!localFS.exists(starttimeDBPath)) {
            if (!localFS.mkdirs(starttimeDBPath)) {
                throw new IOException("Couldn't create directory for leveldb " + "timeline store " + starttimeDBPath);
            }
            localFS.setPermission(starttimeDBPath, LEVELDB_DIR_UMASK);
        }
        if (!localFS.exists(ownerDBPath)) {
            if (!localFS.mkdirs(ownerDBPath)) {
                throw new IOException("Couldn't create directory for leveldb " + "timeline store " + ownerDBPath);
            }
            localFS.setPermission(ownerDBPath, LEVELDB_DIR_UMASK);
        }
    } finally {
        IOUtils.cleanup(LOG, localFS);
    }
    options.maxOpenFiles(conf.getInt(TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES, DEFAULT_TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES));
    options.writeBufferSize(conf.getInt(TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE));
    LOG.info("Using leveldb path " + dbPath);
    domaindb = factory.open(new File(domainDBPath.toString()), options);
    entitydb = new RollingLevelDB(ENTITY);
    entitydb.init(conf);
    indexdb = new RollingLevelDB(INDEX);
    indexdb.init(conf);
    starttimedb = factory.open(new File(starttimeDBPath.toString()), options);
    ownerdb = factory.open(new File(ownerDBPath.toString()), options);
    checkVersion();
    startTimeWriteCache = Collections.synchronizedMap(new LRUMap(getStartTimeWriteCacheSize(conf)));
    startTimeReadCache = Collections.synchronizedMap(new LRUMap(getStartTimeReadCacheSize(conf)));
    writeBatchSize = conf.getInt(TIMELINE_SERVICE_LEVELDB_WRITE_BATCH_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BATCH_SIZE);
    super.serviceInit(conf);
}
Also used : Path(org.apache.hadoop.fs.Path) Options(org.iq80.leveldb.Options) ReadOptions(org.iq80.leveldb.ReadOptions) LRUMap(org.apache.commons.collections.map.LRUMap) FileSystem(org.apache.hadoop.fs.FileSystem) JniDBFactory(org.fusesource.leveldbjni.JniDBFactory) IOException(java.io.IOException) File(java.io.File)

Example 2 with LRUMap

use of org.apache.commons.collections.map.LRUMap in project hadoop by apache.

the class LeveldbTimelineStore method serviceInit.

@Override
@SuppressWarnings("unchecked")
protected void serviceInit(Configuration conf) throws Exception {
    Preconditions.checkArgument(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_TTL_MS, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_TTL_MS) > 0, "%s property value should be greater than zero", YarnConfiguration.TIMELINE_SERVICE_TTL_MS);
    Preconditions.checkArgument(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS) > 0, "%s property value should be greater than zero", YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS);
    Preconditions.checkArgument(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE) >= 0, "%s property value should be greater than or equal to zero", YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE);
    Preconditions.checkArgument(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE) > 0, " %s property value should be greater than zero", YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE);
    Preconditions.checkArgument(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE) > 0, "%s property value should be greater than zero", YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE);
    Options options = new Options();
    options.createIfMissing(true);
    options.cacheSize(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE));
    if (factory == null) {
        factory = new JniDBFactory();
    }
    Path dbPath = new Path(conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH), FILENAME);
    FileSystem localFS = null;
    try {
        localFS = FileSystem.getLocal(conf);
        if (!localFS.exists(dbPath)) {
            if (!localFS.mkdirs(dbPath)) {
                throw new IOException("Couldn't create directory for leveldb " + "timeline store " + dbPath);
            }
            localFS.setPermission(dbPath, LEVELDB_DIR_UMASK);
        }
    } finally {
        IOUtils.cleanup(LOG, localFS);
    }
    LOG.info("Using leveldb path " + dbPath);
    try {
        db = factory.open(new File(dbPath.toString()), options);
    } catch (IOException ioe) {
        File dbFile = new File(dbPath.toString());
        File backupPath = new File(dbPath.toString() + BACKUP_EXT + Time.monotonicNow());
        LOG.warn("Incurred exception while loading LevelDb database. Backing " + "up at " + backupPath, ioe);
        FileUtils.copyDirectory(dbFile, backupPath);
        LOG.warn("Going to try repair");
        factory.repair(dbFile, options);
        db = factory.open(dbFile, options);
    }
    checkVersion();
    startTimeWriteCache = Collections.synchronizedMap(new LRUMap(getStartTimeWriteCacheSize(conf)));
    startTimeReadCache = Collections.synchronizedMap(new LRUMap(getStartTimeReadCacheSize(conf)));
    if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_TTL_ENABLE, true)) {
        deletionThread = new EntityDeletionThread(conf);
        deletionThread.start();
    }
    super.serviceInit(conf);
}
Also used : Path(org.apache.hadoop.fs.Path) LRUMap(org.apache.commons.collections.map.LRUMap) FileSystem(org.apache.hadoop.fs.FileSystem) JniDBFactory(org.fusesource.leveldbjni.JniDBFactory) IOException(java.io.IOException) File(java.io.File)

Example 3 with LRUMap

use of org.apache.commons.collections.map.LRUMap in project jmeter by apache.

the class CacheManager method clearCache.

private void clearCache() {
    log.debug("Clear cache");
    threadCache = new InheritableThreadLocal<Map<String, CacheEntry>>() {

        @Override
        protected Map<String, CacheEntry> initialValue() {
            // Bug 51942 - this map may be used from multiple threads
            // LRUMap is not generic currently
            @SuppressWarnings("unchecked") Map<String, CacheEntry> map = new LRUMap(getMaxSize());
            return Collections.synchronizedMap(map);
        }
    };
}
Also used : LRUMap(org.apache.commons.collections.map.LRUMap) Map(java.util.Map) LRUMap(org.apache.commons.collections.map.LRUMap)

Example 4 with LRUMap

use of org.apache.commons.collections.map.LRUMap in project jackrabbit by apache.

the class DocNumberCache method get.

/**
     * Returns the cache entry for <code>uuid</code>, or <code>null</code> if
     * no entry exists for <code>uuid</code>.
     *
     * @param uuid the key.
     * @return cache entry or <code>null</code>.
     */
Entry get(String uuid) {
    LRUMap cacheSegment = docNumbers[getSegmentIndex(uuid.charAt(0))];
    Entry entry;
    synchronized (cacheSegment) {
        entry = (Entry) cacheSegment.get(uuid);
    }
    if (log.isInfoEnabled()) {
        accesses++;
        if (entry == null) {
            misses++;
        }
        // log at most after 1000 accesses and every 10 seconds
        if (accesses > 1000 && System.currentTimeMillis() - lastLog > LOG_INTERVAL) {
            long ratio = 100;
            if (misses != 0) {
                ratio -= misses * 100L / accesses;
            }
            StringBuffer statistics = new StringBuffer();
            int inUse = 0;
            for (LRUMap docNumber : docNumbers) {
                inUse += docNumber.size();
            }
            statistics.append("size=").append(inUse);
            statistics.append("/").append(docNumbers[0].maxSize() * CACHE_SEGMENTS);
            statistics.append(", #accesses=").append(accesses);
            statistics.append(", #hits=").append((accesses - misses));
            statistics.append(", #misses=").append(misses);
            statistics.append(", cacheRatio=").append(ratio).append("%");
            log.info(statistics.toString());
            accesses = 0;
            misses = 0;
            lastLog = System.currentTimeMillis();
        }
    }
    return entry;
}
Also used : LRUMap(org.apache.commons.collections.map.LRUMap)

Example 5 with LRUMap

use of org.apache.commons.collections.map.LRUMap in project jackrabbit by apache.

the class AbstractPrincipalProvider method init.

/**
     * @see PrincipalProvider#init(java.util.Properties)
     */
public synchronized void init(Properties options) {
    if (initialized) {
        throw new IllegalStateException("already initialized");
    }
    int maxSize = Integer.parseInt(options.getProperty(MAXSIZE_KEY, "1000"));
    cache = new LRUMap(maxSize);
    includeNegative = Boolean.parseBoolean(options.getProperty(NEGATIVE_ENTRY_KEY, "false"));
    initialized = true;
}
Also used : LRUMap(org.apache.commons.collections.map.LRUMap)

Aggregations

LRUMap (org.apache.commons.collections.map.LRUMap)5 File (java.io.File)2 IOException (java.io.IOException)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 Path (org.apache.hadoop.fs.Path)2 JniDBFactory (org.fusesource.leveldbjni.JniDBFactory)2 Map (java.util.Map)1 Options (org.iq80.leveldb.Options)1 ReadOptions (org.iq80.leveldb.ReadOptions)1