Search in sources :

Example 1 with DBIterator

use of org.iq80.leveldb.DBIterator in project camel by apache.

the class LevelDBAggregationRepository method size.

private int size(final String repositoryName) {
    DBIterator it = levelDBFile.getDb().iterator();
    String prefix = repositoryName + '\0';
    int count = 0;
    try {
        for (it.seek(keyBuilder(repositoryName, "")); it.hasNext(); it.next()) {
            if (!asString(it.peekNext().getKey()).startsWith(prefix)) {
                break;
            }
            count++;
        }
    } finally {
        // Make sure you close the iterator to avoid resource leaks.
        IOHelper.close(it);
    }
    LOG.debug("Size of repository [{}] -> {}", repositoryName, count);
    return count;
}
Also used : DBIterator(org.iq80.leveldb.DBIterator) JniDBFactory.asString(org.fusesource.leveldbjni.JniDBFactory.asString)

Example 2 with DBIterator

use of org.iq80.leveldb.DBIterator in project hadoop by apache.

the class RollingLevelDBTimelineStore method getEntityTimelines.

@Override
public TimelineEvents getEntityTimelines(String entityType, SortedSet<String> entityIds, Long limit, Long windowStart, Long windowEnd, Set<String> eventType) throws IOException {
    TimelineEvents events = new TimelineEvents();
    if (entityIds == null || entityIds.isEmpty()) {
        return events;
    }
    // create a lexicographically-ordered map from start time to entities
    Map<byte[], List<EntityIdentifier>> startTimeMap = new TreeMap<byte[], List<EntityIdentifier>>(new Comparator<byte[]>() {

        @Override
        public int compare(byte[] o1, byte[] o2) {
            return WritableComparator.compareBytes(o1, 0, o1.length, o2, 0, o2.length);
        }
    });
    DBIterator iterator = null;
    try {
        // skip entities with no start time
        for (String entityId : entityIds) {
            byte[] startTime = getStartTime(entityId, entityType);
            if (startTime != null) {
                List<EntityIdentifier> entities = startTimeMap.get(startTime);
                if (entities == null) {
                    entities = new ArrayList<EntityIdentifier>();
                    startTimeMap.put(startTime, entities);
                }
                entities.add(new EntityIdentifier(entityId, entityType));
            }
        }
        for (Entry<byte[], List<EntityIdentifier>> entry : startTimeMap.entrySet()) {
            // look up the events matching the given parameters (limit,
            // start time, end time, event types) for entities whose start times
            // were found and add the entities to the return list
            byte[] revStartTime = entry.getKey();
            for (EntityIdentifier entityIdentifier : entry.getValue()) {
                EventsOfOneEntity entity = new EventsOfOneEntity();
                entity.setEntityId(entityIdentifier.getId());
                entity.setEntityType(entityType);
                events.addEvent(entity);
                KeyBuilder kb = KeyBuilder.newInstance().add(entityType).add(revStartTime).add(entityIdentifier.getId()).add(EVENTS_COLUMN);
                byte[] prefix = kb.getBytesForLookup();
                if (windowEnd == null) {
                    windowEnd = Long.MAX_VALUE;
                }
                byte[] revts = writeReverseOrderedLong(windowEnd);
                kb.add(revts);
                byte[] first = kb.getBytesForLookup();
                byte[] last = null;
                if (windowStart != null) {
                    last = KeyBuilder.newInstance().add(prefix).add(writeReverseOrderedLong(windowStart)).getBytesForLookup();
                }
                if (limit == null) {
                    limit = DEFAULT_LIMIT;
                }
                DB db = entitydb.getDBForStartTime(readReverseOrderedLong(revStartTime, 0));
                if (db == null) {
                    continue;
                }
                iterator = db.iterator();
                for (iterator.seek(first); entity.getEvents().size() < limit && iterator.hasNext(); iterator.next()) {
                    byte[] key = iterator.peekNext().getKey();
                    if (!prefixMatches(prefix, prefix.length, key) || (last != null && WritableComparator.compareBytes(key, 0, key.length, last, 0, last.length) > 0)) {
                        break;
                    }
                    TimelineEvent event = getEntityEvent(eventType, key, prefix.length, iterator.peekNext().getValue());
                    if (event != null) {
                        entity.addEvent(event);
                    }
                }
            }
        }
    } finally {
        IOUtils.cleanup(LOG, iterator);
    }
    return events;
}
Also used : TimelineEvent(org.apache.hadoop.yarn.api.records.timeline.TimelineEvent) EventsOfOneEntity(org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEntity) TimelineEvents(org.apache.hadoop.yarn.api.records.timeline.TimelineEvents) TreeMap(java.util.TreeMap) DBIterator(org.iq80.leveldb.DBIterator) KeyBuilder(org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.KeyBuilder) List(java.util.List) ArrayList(java.util.ArrayList) DB(org.iq80.leveldb.DB)

Example 3 with DBIterator

use of org.iq80.leveldb.DBIterator in project hadoop by apache.

the class RollingLevelDBTimelineStore method evictOldStartTimes.

@VisibleForTesting
long evictOldStartTimes(long minStartTime) throws IOException {
    LOG.info("Searching for start times to evict earlier than " + minStartTime);
    long batchSize = 0;
    long totalCount = 0;
    long startTimesCount = 0;
    WriteBatch writeBatch = null;
    DBIterator iterator = null;
    try {
        writeBatch = starttimedb.createWriteBatch();
        ReadOptions readOptions = new ReadOptions();
        readOptions.fillCache(false);
        iterator = starttimedb.iterator(readOptions);
        // seek to the first start time entry
        iterator.seekToFirst();
        // evaluate each start time entry to see if it needs to be evicted or not
        while (iterator.hasNext()) {
            Map.Entry<byte[], byte[]> current = iterator.next();
            byte[] entityKey = current.getKey();
            byte[] entityValue = current.getValue();
            long startTime = readReverseOrderedLong(entityValue, 0);
            if (startTime < minStartTime) {
                ++batchSize;
                ++startTimesCount;
                writeBatch.delete(entityKey);
                // a large delete will hold the lock for too long
                if (batchSize >= writeBatchSize) {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Preparing to delete a batch of " + batchSize + " old start times");
                    }
                    starttimedb.write(writeBatch);
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Deleted batch of " + batchSize + ". Total start times deleted so far this cycle: " + startTimesCount);
                    }
                    IOUtils.cleanup(LOG, writeBatch);
                    writeBatch = starttimedb.createWriteBatch();
                    batchSize = 0;
                }
            }
            ++totalCount;
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("Preparing to delete a batch of " + batchSize + " old start times");
        }
        starttimedb.write(writeBatch);
        if (LOG.isDebugEnabled()) {
            LOG.debug("Deleted batch of " + batchSize + ". Total start times deleted so far this cycle: " + startTimesCount);
        }
        LOG.info("Deleted " + startTimesCount + "/" + totalCount + " start time entities earlier than " + minStartTime);
    } finally {
        IOUtils.cleanup(LOG, writeBatch);
        IOUtils.cleanup(LOG, iterator);
    }
    return startTimesCount;
}
Also used : DBIterator(org.iq80.leveldb.DBIterator) ReadOptions(org.iq80.leveldb.ReadOptions) RollingWriteBatch(org.apache.hadoop.yarn.server.timeline.RollingLevelDB.RollingWriteBatch) WriteBatch(org.iq80.leveldb.WriteBatch) Map(java.util.Map) TreeMap(java.util.TreeMap) LRUMap(org.apache.commons.collections.map.LRUMap) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 4 with DBIterator

use of org.iq80.leveldb.DBIterator in project cdap by caskdata.

the class LevelDBTableCore method scan.

public Scanner scan(byte[] startRow, byte[] stopRow, @Nullable FuzzyRowFilter filter, @Nullable byte[][] columns, @Nullable Transaction tx) throws IOException {
    if (columns != null) {
        if (columns.length == 0) {
            return EMPTY_SCANNER;
        }
        columns = Arrays.copyOf(columns, columns.length);
        Arrays.sort(columns, Bytes.BYTES_COMPARATOR);
    }
    DBIterator iterator = getDB().iterator();
    seekToStart(iterator, startRow);
    byte[] endKey = stopRow == null ? null : createEndKey(stopRow);
    return new LevelDBScanner(iterator, endKey, filter, columns, tx);
}
Also used : DBIterator(org.iq80.leveldb.DBIterator)

Example 5 with DBIterator

use of org.iq80.leveldb.DBIterator in project cdap by caskdata.

the class LevelDBTableCore method deleteColumn.

public void deleteColumn(byte[] row, byte[] column) throws IOException {
    DB db = getDB();
    WriteBatch batch = db.createWriteBatch();
    try (DBIterator iterator = db.iterator()) {
        addToDeleteBatch(batch, iterator, row, column);
        db.write(batch);
    }
}
Also used : DBIterator(org.iq80.leveldb.DBIterator) WriteBatch(org.iq80.leveldb.WriteBatch) DB(org.iq80.leveldb.DB)

Aggregations

DBIterator (org.iq80.leveldb.DBIterator)17 DB (org.iq80.leveldb.DB)7 WriteBatch (org.iq80.leveldb.WriteBatch)5 JniDBFactory.asString (org.fusesource.leveldbjni.JniDBFactory.asString)4 Map (java.util.Map)3 ImmutableMap (com.google.common.collect.ImmutableMap)2 ImmutableSortedMap (com.google.common.collect.ImmutableSortedMap)2 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 LinkedHashSet (java.util.LinkedHashSet)2 NavigableMap (java.util.NavigableMap)2 TreeMap (java.util.TreeMap)2 GenericObjectMapper.readReverseOrderedLong (org.apache.hadoop.yarn.server.timeline.GenericObjectMapper.readReverseOrderedLong)2 GenericObjectMapper.writeReverseOrderedLong (org.apache.hadoop.yarn.server.timeline.GenericObjectMapper.writeReverseOrderedLong)2 KeyBuilder (org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.KeyBuilder)2 KeyParser (org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.KeyParser)2 Row (co.cask.cdap.api.dataset.table.Row)1 Scanner (co.cask.cdap.api.dataset.table.Scanner)1 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 InvocationHandler (java.lang.reflect.InvocationHandler)1