Search in sources :

Example 26 with DB

use of org.iq80.leveldb.DB in project cdap by caskdata.

the class LevelDBTableCore method deleteRange.

public void deleteRange(byte[] startRow, byte[] stopRow, @Nullable FuzzyRowFilter filter, @Nullable byte[][] columns) throws IOException {
    if (columns != null) {
        if (columns.length == 0) {
            return;
        }
        columns = Arrays.copyOf(columns, columns.length);
        Arrays.sort(columns, Bytes.BYTES_COMPARATOR);
    }
    DB db = getDB();
    DBIterator iterator = db.iterator();
    seekToStart(iterator, startRow);
    byte[] endKey = stopRow == null ? null : createEndKey(stopRow);
    DBIterator deleteIterator = db.iterator();
    seekToStart(deleteIterator, startRow);
    // todo make configurable
    final int deletesPerRound = 1024;
    try (Scanner scanner = new LevelDBScanner(iterator, endKey, filter, columns, null)) {
        Row rowValues;
        WriteBatch batch = db.createWriteBatch();
        int deletesInBatch = 0;
        // go through all matching cells and delete them in batches.
        while ((rowValues = scanner.next()) != null) {
            byte[] row = rowValues.getRow();
            for (byte[] column : rowValues.getColumns().keySet()) {
                addToDeleteBatch(batch, deleteIterator, row, column);
                deletesInBatch++;
                // perform the deletes when we have built up a batch.
                if (deletesInBatch >= deletesPerRound) {
                    // delete all the entries that were found
                    db.write(batch, getWriteOptions());
                    batch = db.createWriteBatch();
                    deletesInBatch = 0;
                }
            }
        }
        // perform any outstanding deletes
        if (deletesInBatch > 0) {
            db.write(batch, getWriteOptions());
        }
    } finally {
        deleteIterator.close();
    }
}
Also used : DBIterator(org.iq80.leveldb.DBIterator) Scanner(co.cask.cdap.api.dataset.table.Scanner) Row(co.cask.cdap.api.dataset.table.Row) WriteBatch(org.iq80.leveldb.WriteBatch) DB(org.iq80.leveldb.DB)

Example 27 with DB

use of org.iq80.leveldb.DB in project cdap by caskdata.

the class LevelDBTableService method createTable.

private void createTable(String name) throws IOException {
    String dbPath = getDBPath(basePath, name);
    Options options = new Options();
    options.createIfMissing(true);
    options.errorIfExists(false);
    options.comparator(new KeyValueDBComparator());
    options.blockSize(blockSize);
    options.cacheSize(cacheSize);
    DB db = factory.open(new File(dbPath), options);
    tables.put(name, db);
}
Also used : WriteOptions(org.iq80.leveldb.WriteOptions) Options(org.iq80.leveldb.Options) File(java.io.File) DB(org.iq80.leveldb.DB)

Example 28 with DB

use of org.iq80.leveldb.DB in project cdap by caskdata.

the class LevelDBTableService method getTable.

public DB getTable(String tableName) throws IOException {
    DB db = tables.get(tableName);
    if (db == null) {
        synchronized (tables) {
            db = tables.get(tableName);
            if (db == null) {
                db = openTable(tableName);
                tables.put(tableName, db);
            }
        }
    }
    return db;
}
Also used : DB(org.iq80.leveldb.DB)

Example 29 with DB

use of org.iq80.leveldb.DB in project hadoop by apache.

the class TestLeveldbRMStateStore method testCompactionCycle.

@Test(timeout = 60000)
public void testCompactionCycle() throws Exception {
    final DB mockdb = mock(DB.class);
    conf.setLong(YarnConfiguration.RM_LEVELDB_COMPACTION_INTERVAL_SECS, 1);
    LeveldbRMStateStore store = new LeveldbRMStateStore() {

        @Override
        protected DB openDatabase() throws Exception {
            return mockdb;
        }
    };
    store.init(conf);
    store.start();
    verify(mockdb, timeout(10000)).compactRange((byte[]) isNull(), (byte[]) isNull());
    store.close();
}
Also used : DB(org.iq80.leveldb.DB) Test(org.junit.Test)

Example 30 with DB

use of org.iq80.leveldb.DB in project distributedlog by twitter.

the class ReaderWithOffsets method main.

public static void main(String[] args) throws Exception {
    if (4 != args.length) {
        System.out.println(HELP);
        return;
    }
    String dlUriStr = args[0];
    final String streamName = args[1];
    final String readerId = args[2];
    final String offsetStoreFile = args[3];
    URI uri = URI.create(dlUriStr);
    DistributedLogConfiguration conf = new DistributedLogConfiguration();
    DistributedLogNamespace namespace = DistributedLogNamespaceBuilder.newBuilder().conf(conf).uri(uri).build();
    // open the dlm
    System.out.println("Opening log stream " + streamName);
    DistributedLogManager dlm = namespace.openLog(streamName);
    // open the offset store
    Options options = new Options();
    options.createIfMissing(true);
    final DB offsetDB = factory.open(new File(offsetStoreFile), options);
    final AtomicReference<DLSN> lastDLSN = new AtomicReference<DLSN>(null);
    // offset updater
    final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
    executorService.scheduleAtFixedRate(new Runnable() {

        @Override
        public void run() {
            if (null != lastDLSN.get()) {
                offsetDB.put(readerId.getBytes(UTF_8), lastDLSN.get().serializeBytes());
                System.out.println("Updated reader " + readerId + " offset to " + lastDLSN.get());
            }
        }
    }, 10, 10, TimeUnit.SECONDS);
    try {
        byte[] offset = offsetDB.get(readerId.getBytes(UTF_8));
        DLSN dlsn;
        if (null == offset) {
            dlsn = DLSN.InitialDLSN;
        } else {
            dlsn = DLSN.deserializeBytes(offset);
        }
        readLoop(dlm, dlsn, lastDLSN);
    } finally {
        offsetDB.close();
        dlm.close();
        namespace.close();
    }
}
Also used : Options(org.iq80.leveldb.Options) DistributedLogNamespace(com.twitter.distributedlog.namespace.DistributedLogNamespace) AtomicReference(java.util.concurrent.atomic.AtomicReference) URI(java.net.URI) File(java.io.File) DB(org.iq80.leveldb.DB)

Aggregations

IOException (java.io.IOException)25 DB (org.iq80.leveldb.DB)25 DBException (org.iq80.leveldb.DBException)20 LeveldbIterator (org.apache.hadoop.yarn.server.utils.LeveldbIterator)16 Options (org.iq80.leveldb.Options)16 File (java.io.File)15 JniDBFactory.asString (org.fusesource.leveldbjni.JniDBFactory.asString)14 WriteBatch (org.iq80.leveldb.WriteBatch)9 DBIterator (org.iq80.leveldb.DBIterator)7 Map (java.util.Map)5 Path (org.apache.hadoop.fs.Path)5 Test (org.junit.Test)5 ImmutableMap (com.google.common.collect.ImmutableMap)4 ImmutableSortedMap (com.google.common.collect.ImmutableSortedMap)4 NavigableMap (java.util.NavigableMap)4 NativeDB (org.fusesource.leveldbjni.internal.NativeDB)4 WriteOptions (org.iq80.leveldb.WriteOptions)4 DB (com.codecademy.eventhub.base.DB)3 Provides (com.google.inject.Provides)3 ArrayList (java.util.ArrayList)3