Search in sources :

Example 16 with DB

use of org.iq80.leveldb.DB in project hadoop by apache.

the class RollingLevelDBTimelineStore method getEntityTimelines.

@Override
public TimelineEvents getEntityTimelines(String entityType, SortedSet<String> entityIds, Long limit, Long windowStart, Long windowEnd, Set<String> eventType) throws IOException {
    TimelineEvents events = new TimelineEvents();
    if (entityIds == null || entityIds.isEmpty()) {
        return events;
    }
    // create a lexicographically-ordered map from start time to entities
    Map<byte[], List<EntityIdentifier>> startTimeMap = new TreeMap<byte[], List<EntityIdentifier>>(new Comparator<byte[]>() {

        @Override
        public int compare(byte[] o1, byte[] o2) {
            return WritableComparator.compareBytes(o1, 0, o1.length, o2, 0, o2.length);
        }
    });
    DBIterator iterator = null;
    try {
        // skip entities with no start time
        for (String entityId : entityIds) {
            byte[] startTime = getStartTime(entityId, entityType);
            if (startTime != null) {
                List<EntityIdentifier> entities = startTimeMap.get(startTime);
                if (entities == null) {
                    entities = new ArrayList<EntityIdentifier>();
                    startTimeMap.put(startTime, entities);
                }
                entities.add(new EntityIdentifier(entityId, entityType));
            }
        }
        for (Entry<byte[], List<EntityIdentifier>> entry : startTimeMap.entrySet()) {
            // look up the events matching the given parameters (limit,
            // start time, end time, event types) for entities whose start times
            // were found and add the entities to the return list
            byte[] revStartTime = entry.getKey();
            for (EntityIdentifier entityIdentifier : entry.getValue()) {
                EventsOfOneEntity entity = new EventsOfOneEntity();
                entity.setEntityId(entityIdentifier.getId());
                entity.setEntityType(entityType);
                events.addEvent(entity);
                KeyBuilder kb = KeyBuilder.newInstance().add(entityType).add(revStartTime).add(entityIdentifier.getId()).add(EVENTS_COLUMN);
                byte[] prefix = kb.getBytesForLookup();
                if (windowEnd == null) {
                    windowEnd = Long.MAX_VALUE;
                }
                byte[] revts = writeReverseOrderedLong(windowEnd);
                kb.add(revts);
                byte[] first = kb.getBytesForLookup();
                byte[] last = null;
                if (windowStart != null) {
                    last = KeyBuilder.newInstance().add(prefix).add(writeReverseOrderedLong(windowStart)).getBytesForLookup();
                }
                if (limit == null) {
                    limit = DEFAULT_LIMIT;
                }
                DB db = entitydb.getDBForStartTime(readReverseOrderedLong(revStartTime, 0));
                if (db == null) {
                    continue;
                }
                iterator = db.iterator();
                for (iterator.seek(first); entity.getEvents().size() < limit && iterator.hasNext(); iterator.next()) {
                    byte[] key = iterator.peekNext().getKey();
                    if (!prefixMatches(prefix, prefix.length, key) || (last != null && WritableComparator.compareBytes(key, 0, key.length, last, 0, last.length) > 0)) {
                        break;
                    }
                    TimelineEvent event = getEntityEvent(eventType, key, prefix.length, iterator.peekNext().getValue());
                    if (event != null) {
                        entity.addEvent(event);
                    }
                }
            }
        }
    } finally {
        IOUtils.cleanup(LOG, iterator);
    }
    return events;
}
Also used : TimelineEvent(org.apache.hadoop.yarn.api.records.timeline.TimelineEvent) EventsOfOneEntity(org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEntity) TimelineEvents(org.apache.hadoop.yarn.api.records.timeline.TimelineEvents) TreeMap(java.util.TreeMap) DBIterator(org.iq80.leveldb.DBIterator) KeyBuilder(org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.KeyBuilder) List(java.util.List) ArrayList(java.util.ArrayList) DB(org.iq80.leveldb.DB)

Example 17 with DB

use of org.iq80.leveldb.DB in project hadoop by apache.

the class LeveldbTimelineStateStore method startStorage.

@Override
protected void startStorage() throws IOException {
    Options options = new Options();
    Path dbPath = new Path(getConfig().get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_STATE_STORE_PATH), DB_NAME);
    FileSystem localFS = null;
    try {
        localFS = FileSystem.getLocal(getConfig());
        if (!localFS.exists(dbPath)) {
            if (!localFS.mkdirs(dbPath)) {
                throw new IOException("Couldn't create directory for leveldb " + "timeline store " + dbPath);
            }
            localFS.setPermission(dbPath, LEVELDB_DIR_UMASK);
        }
    } finally {
        IOUtils.cleanup(LOG, localFS);
    }
    JniDBFactory factory = new JniDBFactory();
    try {
        options.createIfMissing(false);
        db = factory.open(new File(dbPath.toString()), options);
        LOG.info("Loading the existing database at th path: " + dbPath.toString());
        checkVersion();
    } catch (NativeDB.DBException e) {
        if (e.isNotFound() || e.getMessage().contains(" does not exist ")) {
            try {
                options.createIfMissing(true);
                db = factory.open(new File(dbPath.toString()), options);
                LOG.info("Creating a new database at th path: " + dbPath.toString());
                storeVersion(CURRENT_VERSION_INFO);
            } catch (DBException ex) {
                throw new IOException(ex);
            }
        } else {
            throw new IOException(e);
        }
    } catch (DBException e) {
        throw new IOException(e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Options(org.iq80.leveldb.Options) DBException(org.iq80.leveldb.DBException) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) JniDBFactory(org.fusesource.leveldbjni.JniDBFactory) NativeDB(org.fusesource.leveldbjni.internal.NativeDB) File(java.io.File)

Example 18 with DB

use of org.iq80.leveldb.DB in project hadoop by apache.

the class TestRollingLevelDB method testInsertAfterRollPeriodRollsDB.

@Test
public void testInsertAfterRollPeriodRollsDB() throws Exception {
    rollingLevelDB.init(conf);
    long now = rollingLevelDB.currentTimeMillis();
    DB db = rollingLevelDB.getDBForStartTime(now);
    long startTime = rollingLevelDB.getStartTimeFor(db);
    Assert.assertEquals("Received level db for incorrect start time", rollingLevelDB.computeCurrentCheckMillis(now), startTime);
    now = rollingLevelDB.getNextRollingTimeMillis();
    rollingLevelDB.setCurrentTimeMillis(now);
    db = rollingLevelDB.getDBForStartTime(now);
    startTime = rollingLevelDB.getStartTimeFor(db);
    Assert.assertEquals("Received level db for incorrect start time", rollingLevelDB.computeCurrentCheckMillis(now), startTime);
}
Also used : DB(org.iq80.leveldb.DB) Test(org.junit.Test)

Example 19 with DB

use of org.iq80.leveldb.DB in project camel by apache.

the class LevelDBFile method start.

public void start() {
    if (getFile() == null) {
        throw new IllegalArgumentException("A file must be configured");
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Starting LevelDB using file: {}", getFile());
    }
    Options options = new Options().writeBufferSize(writeBufferSize).maxOpenFiles(maxOpenFiles).blockRestartInterval(blockRestartInterval).blockSize(blockSize).verifyChecksums(verifyChecksums).paranoidChecks(paranoidChecks).cacheSize(cacheSize);
    if ("snappy".equals(compressionType)) {
        options.compressionType(CompressionType.SNAPPY);
    } else {
        options.compressionType(CompressionType.NONE);
    }
    options.createIfMissing(true);
    try {
        getFile().getParentFile().mkdirs();
        db = factory.open(getFile(), options);
    } catch (IOException ioe) {
        throw new RuntimeException("Error opening LevelDB with file " + getFile(), ioe);
    }
}
Also used : Options(org.iq80.leveldb.Options) WriteOptions(org.iq80.leveldb.WriteOptions) IOException(java.io.IOException)

Example 20 with DB

use of org.iq80.leveldb.DB in project Mycat-Server by MyCATApache.

the class LevelDBCachePooFactory method createCachePool.

@Override
public CachePool createCachePool(String poolName, int cacheSize, int expireSeconds) {
    Options options = new Options();
    //cacheSize M 大小
    options.cacheSize(cacheSize * 1048576);
    options.createIfMissing(true);
    DB db = null;
    try {
        db = factory.open(new File("leveldb\\" + poolName), options);
    // Use the db in here....
    } catch (Exception e) {
    // Make sure you close the db to shutdown the 
    // database and avoid resource leaks.
    // db.close();
    }
    return new LevelDBPool(poolName, db, cacheSize);
}
Also used : Options(org.iq80.leveldb.Options) File(java.io.File) DB(org.iq80.leveldb.DB)

Aggregations

IOException (java.io.IOException)25 DB (org.iq80.leveldb.DB)25 DBException (org.iq80.leveldb.DBException)20 LeveldbIterator (org.apache.hadoop.yarn.server.utils.LeveldbIterator)16 Options (org.iq80.leveldb.Options)16 File (java.io.File)15 JniDBFactory.asString (org.fusesource.leveldbjni.JniDBFactory.asString)14 WriteBatch (org.iq80.leveldb.WriteBatch)9 DBIterator (org.iq80.leveldb.DBIterator)7 Map (java.util.Map)5 Path (org.apache.hadoop.fs.Path)5 Test (org.junit.Test)5 ImmutableMap (com.google.common.collect.ImmutableMap)4 ImmutableSortedMap (com.google.common.collect.ImmutableSortedMap)4 NavigableMap (java.util.NavigableMap)4 NativeDB (org.fusesource.leveldbjni.internal.NativeDB)4 WriteOptions (org.iq80.leveldb.WriteOptions)4 DB (com.codecademy.eventhub.base.DB)3 Provides (com.google.inject.Provides)3 ArrayList (java.util.ArrayList)3