use of org.iq80.leveldb.DBIterator in project cdap by caskdata.
the class LevelDBTableCore method deleteRows.
public void deleteRows(byte[] prefix) throws IOException {
Preconditions.checkNotNull(prefix, "prefix must not be null");
DB db = getDB();
WriteBatch batch = db.createWriteBatch();
try (DBIterator iterator = db.iterator()) {
iterator.seek(createStartKey(prefix));
while (iterator.hasNext()) {
Map.Entry<byte[], byte[]> entry = iterator.next();
if (!Bytes.startsWith(KeyValue.fromKey(entry.getKey()).getRow(), prefix)) {
// iterator is past prefix
break;
}
batch.delete(entry.getKey());
}
db.write(batch);
}
}
use of org.iq80.leveldb.DBIterator in project cdap by caskdata.
the class LevelDBTableCore method deleteRange.
public void deleteRange(byte[] startRow, byte[] stopRow, @Nullable FuzzyRowFilter filter, @Nullable byte[][] columns) throws IOException {
if (columns != null) {
if (columns.length == 0) {
return;
}
columns = Arrays.copyOf(columns, columns.length);
Arrays.sort(columns, Bytes.BYTES_COMPARATOR);
}
DB db = getDB();
DBIterator iterator = db.iterator();
seekToStart(iterator, startRow);
byte[] endKey = stopRow == null ? null : createEndKey(stopRow);
DBIterator deleteIterator = db.iterator();
seekToStart(deleteIterator, startRow);
// todo make configurable
final int deletesPerRound = 1024;
try (Scanner scanner = new LevelDBScanner(iterator, endKey, filter, columns, null)) {
Row rowValues;
WriteBatch batch = db.createWriteBatch();
int deletesInBatch = 0;
// go through all matching cells and delete them in batches.
while ((rowValues = scanner.next()) != null) {
byte[] row = rowValues.getRow();
for (byte[] column : rowValues.getColumns().keySet()) {
addToDeleteBatch(batch, deleteIterator, row, column);
deletesInBatch++;
// perform the deletes when we have built up a batch.
if (deletesInBatch >= deletesPerRound) {
// delete all the entries that were found
db.write(batch, getWriteOptions());
batch = db.createWriteBatch();
deletesInBatch = 0;
}
}
}
// perform any outstanding deletes
if (deletesInBatch > 0) {
db.write(batch, getWriteOptions());
}
} finally {
deleteIterator.close();
}
}
use of org.iq80.leveldb.DBIterator in project camel by apache.
the class LevelDBAggregationRepository method getKeys.
public Set<String> getKeys() {
final Set<String> keys = new LinkedHashSet<String>();
// interval task could potentially be running while we are shutting down so check for that
if (!isRunAllowed()) {
return null;
}
DBIterator it = levelDBFile.getDb().iterator();
String keyBuffer;
try {
String prefix = repositoryName + '\0';
for (it.seek(keyBuilder(repositoryName, "")); it.hasNext(); it.next()) {
if (!isRunAllowed()) {
break;
}
keyBuffer = asString(it.peekNext().getKey());
if (!keyBuffer.startsWith(prefix)) {
break;
}
String key = keyBuffer.substring(prefix.length());
LOG.trace("getKey [{}]", key);
keys.add(key);
}
} finally {
// Make sure you close the iterator to avoid resource leaks.
IOHelper.close(it);
}
return Collections.unmodifiableSet(keys);
}
use of org.iq80.leveldb.DBIterator in project camel by apache.
the class LevelDBAggregationRepository method scan.
public Set<String> scan(CamelContext camelContext) {
final Set<String> answer = new LinkedHashSet<String>();
if (!isRunAllowed()) {
return null;
}
DBIterator it = levelDBFile.getDb().iterator();
String keyBuffer;
try {
String prefix = getRepositoryNameCompleted() + '\0';
for (it.seek(keyBuilder(getRepositoryNameCompleted(), "")); it.hasNext(); it.next()) {
keyBuffer = asString(it.peekNext().getKey());
if (!keyBuffer.startsWith(prefix)) {
break;
}
String exchangeId = keyBuffer.substring(prefix.length());
LOG.trace("Scan exchangeId [{}]", exchangeId);
answer.add(exchangeId);
}
} finally {
// Make sure you close the iterator to avoid resource leaks.
IOHelper.close(it);
}
if (answer.size() == 0) {
LOG.trace("Scanned and found no exchange to recover.");
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Scanned and found {} exchange(s) to recover (note some of them may already be in progress).", answer.size());
}
}
return answer;
}
use of org.iq80.leveldb.DBIterator in project hadoop by apache.
the class RollingLevelDBTimelineStore method getDomain.
@Override
public TimelineDomain getDomain(String domainId) throws IOException {
DBIterator iterator = null;
try {
byte[] prefix = KeyBuilder.newInstance().add(domainId).getBytesForLookup();
iterator = domaindb.iterator();
iterator.seek(prefix);
return getTimelineDomain(iterator, domainId, prefix);
} finally {
IOUtils.cleanup(LOG, iterator);
}
}
Aggregations