use of org.iq80.leveldb.DBIterator in project camel by apache.
the class LevelDBAggregationRepository method size.
private int size(final String repositoryName) {
DBIterator it = levelDBFile.getDb().iterator();
String prefix = repositoryName + '\0';
int count = 0;
try {
for (it.seek(keyBuilder(repositoryName, "")); it.hasNext(); it.next()) {
if (!asString(it.peekNext().getKey()).startsWith(prefix)) {
break;
}
count++;
}
} finally {
// Make sure you close the iterator to avoid resource leaks.
IOHelper.close(it);
}
LOG.debug("Size of repository [{}] -> {}", repositoryName, count);
return count;
}
use of org.iq80.leveldb.DBIterator in project hadoop by apache.
the class RollingLevelDBTimelineStore method getEntityTimelines.
@Override
public TimelineEvents getEntityTimelines(String entityType, SortedSet<String> entityIds, Long limit, Long windowStart, Long windowEnd, Set<String> eventType) throws IOException {
TimelineEvents events = new TimelineEvents();
if (entityIds == null || entityIds.isEmpty()) {
return events;
}
// create a lexicographically-ordered map from start time to entities
Map<byte[], List<EntityIdentifier>> startTimeMap = new TreeMap<byte[], List<EntityIdentifier>>(new Comparator<byte[]>() {
@Override
public int compare(byte[] o1, byte[] o2) {
return WritableComparator.compareBytes(o1, 0, o1.length, o2, 0, o2.length);
}
});
DBIterator iterator = null;
try {
// skip entities with no start time
for (String entityId : entityIds) {
byte[] startTime = getStartTime(entityId, entityType);
if (startTime != null) {
List<EntityIdentifier> entities = startTimeMap.get(startTime);
if (entities == null) {
entities = new ArrayList<EntityIdentifier>();
startTimeMap.put(startTime, entities);
}
entities.add(new EntityIdentifier(entityId, entityType));
}
}
for (Entry<byte[], List<EntityIdentifier>> entry : startTimeMap.entrySet()) {
// look up the events matching the given parameters (limit,
// start time, end time, event types) for entities whose start times
// were found and add the entities to the return list
byte[] revStartTime = entry.getKey();
for (EntityIdentifier entityIdentifier : entry.getValue()) {
EventsOfOneEntity entity = new EventsOfOneEntity();
entity.setEntityId(entityIdentifier.getId());
entity.setEntityType(entityType);
events.addEvent(entity);
KeyBuilder kb = KeyBuilder.newInstance().add(entityType).add(revStartTime).add(entityIdentifier.getId()).add(EVENTS_COLUMN);
byte[] prefix = kb.getBytesForLookup();
if (windowEnd == null) {
windowEnd = Long.MAX_VALUE;
}
byte[] revts = writeReverseOrderedLong(windowEnd);
kb.add(revts);
byte[] first = kb.getBytesForLookup();
byte[] last = null;
if (windowStart != null) {
last = KeyBuilder.newInstance().add(prefix).add(writeReverseOrderedLong(windowStart)).getBytesForLookup();
}
if (limit == null) {
limit = DEFAULT_LIMIT;
}
DB db = entitydb.getDBForStartTime(readReverseOrderedLong(revStartTime, 0));
if (db == null) {
continue;
}
iterator = db.iterator();
for (iterator.seek(first); entity.getEvents().size() < limit && iterator.hasNext(); iterator.next()) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(prefix, prefix.length, key) || (last != null && WritableComparator.compareBytes(key, 0, key.length, last, 0, last.length) > 0)) {
break;
}
TimelineEvent event = getEntityEvent(eventType, key, prefix.length, iterator.peekNext().getValue());
if (event != null) {
entity.addEvent(event);
}
}
}
}
} finally {
IOUtils.cleanup(LOG, iterator);
}
return events;
}
use of org.iq80.leveldb.DBIterator in project hadoop by apache.
the class RollingLevelDBTimelineStore method evictOldStartTimes.
@VisibleForTesting
long evictOldStartTimes(long minStartTime) throws IOException {
LOG.info("Searching for start times to evict earlier than " + minStartTime);
long batchSize = 0;
long totalCount = 0;
long startTimesCount = 0;
WriteBatch writeBatch = null;
DBIterator iterator = null;
try {
writeBatch = starttimedb.createWriteBatch();
ReadOptions readOptions = new ReadOptions();
readOptions.fillCache(false);
iterator = starttimedb.iterator(readOptions);
// seek to the first start time entry
iterator.seekToFirst();
// evaluate each start time entry to see if it needs to be evicted or not
while (iterator.hasNext()) {
Map.Entry<byte[], byte[]> current = iterator.next();
byte[] entityKey = current.getKey();
byte[] entityValue = current.getValue();
long startTime = readReverseOrderedLong(entityValue, 0);
if (startTime < minStartTime) {
++batchSize;
++startTimesCount;
writeBatch.delete(entityKey);
// a large delete will hold the lock for too long
if (batchSize >= writeBatchSize) {
if (LOG.isDebugEnabled()) {
LOG.debug("Preparing to delete a batch of " + batchSize + " old start times");
}
starttimedb.write(writeBatch);
if (LOG.isDebugEnabled()) {
LOG.debug("Deleted batch of " + batchSize + ". Total start times deleted so far this cycle: " + startTimesCount);
}
IOUtils.cleanup(LOG, writeBatch);
writeBatch = starttimedb.createWriteBatch();
batchSize = 0;
}
}
++totalCount;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Preparing to delete a batch of " + batchSize + " old start times");
}
starttimedb.write(writeBatch);
if (LOG.isDebugEnabled()) {
LOG.debug("Deleted batch of " + batchSize + ". Total start times deleted so far this cycle: " + startTimesCount);
}
LOG.info("Deleted " + startTimesCount + "/" + totalCount + " start time entities earlier than " + minStartTime);
} finally {
IOUtils.cleanup(LOG, writeBatch);
IOUtils.cleanup(LOG, iterator);
}
return startTimesCount;
}
use of org.iq80.leveldb.DBIterator in project cdap by caskdata.
the class LevelDBTableCore method scan.
public Scanner scan(byte[] startRow, byte[] stopRow, @Nullable FuzzyRowFilter filter, @Nullable byte[][] columns, @Nullable Transaction tx) throws IOException {
if (columns != null) {
if (columns.length == 0) {
return EMPTY_SCANNER;
}
columns = Arrays.copyOf(columns, columns.length);
Arrays.sort(columns, Bytes.BYTES_COMPARATOR);
}
DBIterator iterator = getDB().iterator();
seekToStart(iterator, startRow);
byte[] endKey = stopRow == null ? null : createEndKey(stopRow);
return new LevelDBScanner(iterator, endKey, filter, columns, tx);
}
use of org.iq80.leveldb.DBIterator in project cdap by caskdata.
the class LevelDBTableCore method deleteColumn.
public void deleteColumn(byte[] row, byte[] column) throws IOException {
DB db = getDB();
WriteBatch batch = db.createWriteBatch();
try (DBIterator iterator = db.iterator()) {
addToDeleteBatch(batch, iterator, row, column);
db.write(batch);
}
}
Aggregations