Search in sources :

Example 6 with WriteBatch

use of org.iq80.leveldb.WriteBatch in project hadoop by apache.

the class NMLeveldbStateStoreService method removeLocalizedResource.

@Override
public void removeLocalizedResource(String user, ApplicationId appId, Path localPath) throws IOException {
    String localPathStr = localPath.toString();
    String startedKey = getResourceStartedKey(user, appId, localPathStr);
    String completedKey = getResourceCompletedKey(user, appId, localPathStr);
    if (LOG.isDebugEnabled()) {
        LOG.debug("Removing local resource at " + localPathStr);
    }
    try {
        WriteBatch batch = db.createWriteBatch();
        try {
            batch.delete(bytes(startedKey));
            batch.delete(bytes(completedKey));
            db.write(batch);
        } finally {
            batch.close();
        }
    } catch (DBException e) {
        throw new IOException(e);
    }
}
Also used : DBException(org.iq80.leveldb.DBException) JniDBFactory.asString(org.fusesource.leveldbjni.JniDBFactory.asString) IOException(java.io.IOException) WriteBatch(org.iq80.leveldb.WriteBatch)

Example 7 with WriteBatch

use of org.iq80.leveldb.WriteBatch in project hadoop by apache.

the class LeveldbRMStateStore method storeReservationState.

@Override
protected void storeReservationState(ReservationAllocationStateProto reservationAllocation, String planName, String reservationIdName) throws Exception {
    try {
        WriteBatch batch = db.createWriteBatch();
        try {
            String key = getReservationNodeKey(planName, reservationIdName);
            if (LOG.isDebugEnabled()) {
                LOG.debug("Storing state for reservation " + reservationIdName + " plan " + planName + " at " + key);
            }
            batch.put(bytes(key), reservationAllocation.toByteArray());
            db.write(batch);
        } finally {
            batch.close();
        }
    } catch (DBException e) {
        throw new IOException(e);
    }
}
Also used : DBException(org.iq80.leveldb.DBException) JniDBFactory.asString(org.fusesource.leveldbjni.JniDBFactory.asString) IOException(java.io.IOException) WriteBatch(org.iq80.leveldb.WriteBatch)

Example 8 with WriteBatch

use of org.iq80.leveldb.WriteBatch in project hadoop by apache.

the class LeveldbRMStateStore method removeReservationState.

@Override
protected void removeReservationState(String planName, String reservationIdName) throws Exception {
    try {
        WriteBatch batch = db.createWriteBatch();
        try {
            String reservationKey = getReservationNodeKey(planName, reservationIdName);
            batch.delete(bytes(reservationKey));
            if (LOG.isDebugEnabled()) {
                LOG.debug("Removing state for reservation " + reservationIdName + " plan " + planName + " at " + reservationKey);
            }
            db.write(batch);
        } finally {
            batch.close();
        }
    } catch (DBException e) {
        throw new IOException(e);
    }
}
Also used : DBException(org.iq80.leveldb.DBException) JniDBFactory.asString(org.fusesource.leveldbjni.JniDBFactory.asString) IOException(java.io.IOException) WriteBatch(org.iq80.leveldb.WriteBatch)

Example 9 with WriteBatch

use of org.iq80.leveldb.WriteBatch in project hadoop by apache.

the class RollingLevelDBTimelineStore method put.

// TODO: make data retention work with the domain data as well
@Override
public void put(TimelineDomain domain) throws IOException {
    WriteBatch domainWriteBatch = null;
    WriteBatch ownerWriteBatch = null;
    try {
        domainWriteBatch = domaindb.createWriteBatch();
        ownerWriteBatch = ownerdb.createWriteBatch();
        if (domain.getId() == null || domain.getId().length() == 0) {
            throw new IllegalArgumentException("Domain doesn't have an ID");
        }
        if (domain.getOwner() == null || domain.getOwner().length() == 0) {
            throw new IllegalArgumentException("Domain doesn't have an owner.");
        }
        // Write description
        byte[] domainEntryKey = createDomainEntryKey(domain.getId(), DESCRIPTION_COLUMN);
        byte[] ownerLookupEntryKey = createOwnerLookupKey(domain.getOwner(), domain.getId(), DESCRIPTION_COLUMN);
        if (domain.getDescription() != null) {
            domainWriteBatch.put(domainEntryKey, domain.getDescription().getBytes(UTF_8));
            ownerWriteBatch.put(ownerLookupEntryKey, domain.getDescription().getBytes(UTF_8));
        } else {
            domainWriteBatch.put(domainEntryKey, EMPTY_BYTES);
            ownerWriteBatch.put(ownerLookupEntryKey, EMPTY_BYTES);
        }
        // Write owner
        domainEntryKey = createDomainEntryKey(domain.getId(), OWNER_COLUMN);
        ownerLookupEntryKey = createOwnerLookupKey(domain.getOwner(), domain.getId(), OWNER_COLUMN);
        // Null check for owner is done before
        domainWriteBatch.put(domainEntryKey, domain.getOwner().getBytes(UTF_8));
        ownerWriteBatch.put(ownerLookupEntryKey, domain.getOwner().getBytes(UTF_8));
        // Write readers
        domainEntryKey = createDomainEntryKey(domain.getId(), READER_COLUMN);
        ownerLookupEntryKey = createOwnerLookupKey(domain.getOwner(), domain.getId(), READER_COLUMN);
        if (domain.getReaders() != null && domain.getReaders().length() > 0) {
            domainWriteBatch.put(domainEntryKey, domain.getReaders().getBytes(UTF_8));
            ownerWriteBatch.put(ownerLookupEntryKey, domain.getReaders().getBytes(UTF_8));
        } else {
            domainWriteBatch.put(domainEntryKey, EMPTY_BYTES);
            ownerWriteBatch.put(ownerLookupEntryKey, EMPTY_BYTES);
        }
        // Write writers
        domainEntryKey = createDomainEntryKey(domain.getId(), WRITER_COLUMN);
        ownerLookupEntryKey = createOwnerLookupKey(domain.getOwner(), domain.getId(), WRITER_COLUMN);
        if (domain.getWriters() != null && domain.getWriters().length() > 0) {
            domainWriteBatch.put(domainEntryKey, domain.getWriters().getBytes(UTF_8));
            ownerWriteBatch.put(ownerLookupEntryKey, domain.getWriters().getBytes(UTF_8));
        } else {
            domainWriteBatch.put(domainEntryKey, EMPTY_BYTES);
            ownerWriteBatch.put(ownerLookupEntryKey, EMPTY_BYTES);
        }
        // Write creation time and modification time
        // We put both timestamps together because they are always retrieved
        // together, and store them in the same way as we did for the entity's
        // start time and insert time.
        domainEntryKey = createDomainEntryKey(domain.getId(), TIMESTAMP_COLUMN);
        ownerLookupEntryKey = createOwnerLookupKey(domain.getOwner(), domain.getId(), TIMESTAMP_COLUMN);
        long currentTimestamp = System.currentTimeMillis();
        byte[] timestamps = domaindb.get(domainEntryKey);
        if (timestamps == null) {
            timestamps = new byte[16];
            writeReverseOrderedLong(currentTimestamp, timestamps, 0);
            writeReverseOrderedLong(currentTimestamp, timestamps, 8);
        } else {
            writeReverseOrderedLong(currentTimestamp, timestamps, 8);
        }
        domainWriteBatch.put(domainEntryKey, timestamps);
        ownerWriteBatch.put(ownerLookupEntryKey, timestamps);
        domaindb.write(domainWriteBatch);
        ownerdb.write(ownerWriteBatch);
    } finally {
        IOUtils.cleanup(LOG, domainWriteBatch);
        IOUtils.cleanup(LOG, ownerWriteBatch);
    }
}
Also used : RollingWriteBatch(org.apache.hadoop.yarn.server.timeline.RollingLevelDB.RollingWriteBatch) WriteBatch(org.iq80.leveldb.WriteBatch)

Example 10 with WriteBatch

use of org.iq80.leveldb.WriteBatch in project hadoop by apache.

the class RollingLevelDBTimelineStore method evictOldStartTimes.

@VisibleForTesting
long evictOldStartTimes(long minStartTime) throws IOException {
    LOG.info("Searching for start times to evict earlier than " + minStartTime);
    long batchSize = 0;
    long totalCount = 0;
    long startTimesCount = 0;
    WriteBatch writeBatch = null;
    DBIterator iterator = null;
    try {
        writeBatch = starttimedb.createWriteBatch();
        ReadOptions readOptions = new ReadOptions();
        readOptions.fillCache(false);
        iterator = starttimedb.iterator(readOptions);
        // seek to the first start time entry
        iterator.seekToFirst();
        // evaluate each start time entry to see if it needs to be evicted or not
        while (iterator.hasNext()) {
            Map.Entry<byte[], byte[]> current = iterator.next();
            byte[] entityKey = current.getKey();
            byte[] entityValue = current.getValue();
            long startTime = readReverseOrderedLong(entityValue, 0);
            if (startTime < minStartTime) {
                ++batchSize;
                ++startTimesCount;
                writeBatch.delete(entityKey);
                // a large delete will hold the lock for too long
                if (batchSize >= writeBatchSize) {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Preparing to delete a batch of " + batchSize + " old start times");
                    }
                    starttimedb.write(writeBatch);
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Deleted batch of " + batchSize + ". Total start times deleted so far this cycle: " + startTimesCount);
                    }
                    IOUtils.cleanup(LOG, writeBatch);
                    writeBatch = starttimedb.createWriteBatch();
                    batchSize = 0;
                }
            }
            ++totalCount;
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("Preparing to delete a batch of " + batchSize + " old start times");
        }
        starttimedb.write(writeBatch);
        if (LOG.isDebugEnabled()) {
            LOG.debug("Deleted batch of " + batchSize + ". Total start times deleted so far this cycle: " + startTimesCount);
        }
        LOG.info("Deleted " + startTimesCount + "/" + totalCount + " start time entities earlier than " + minStartTime);
    } finally {
        IOUtils.cleanup(LOG, writeBatch);
        IOUtils.cleanup(LOG, iterator);
    }
    return startTimesCount;
}
Also used : DBIterator(org.iq80.leveldb.DBIterator) ReadOptions(org.iq80.leveldb.ReadOptions) RollingWriteBatch(org.apache.hadoop.yarn.server.timeline.RollingLevelDB.RollingWriteBatch) WriteBatch(org.iq80.leveldb.WriteBatch) Map(java.util.Map) TreeMap(java.util.TreeMap) LRUMap(org.apache.commons.collections.map.LRUMap) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Aggregations

WriteBatch (org.iq80.leveldb.WriteBatch)30 IOException (java.io.IOException)21 DBException (org.iq80.leveldb.DBException)17 JniDBFactory.asString (org.fusesource.leveldbjni.JniDBFactory.asString)12 Map (java.util.Map)9 DB (org.iq80.leveldb.DB)8 DBIterator (org.iq80.leveldb.DBIterator)5 ImmutableMap (com.google.common.collect.ImmutableMap)4 ImmutableSortedMap (com.google.common.collect.ImmutableSortedMap)4 NavigableMap (java.util.NavigableMap)4 RawMessageTableEntry (co.cask.cdap.messaging.store.RawMessageTableEntry)3 RollingWriteBatch (org.apache.hadoop.yarn.server.timeline.RollingLevelDB.RollingWriteBatch)3 ImmutableMessageTableEntry (co.cask.cdap.messaging.store.ImmutableMessageTableEntry)2 RawPayloadTableEntry (co.cask.cdap.messaging.store.RawPayloadTableEntry)2 ByteArrayOutputStream (java.io.ByteArrayOutputStream)2 DataOutputStream (java.io.DataOutputStream)2 HashMap (java.util.HashMap)2 Row (co.cask.cdap.api.dataset.table.Row)1 Scanner (co.cask.cdap.api.dataset.table.Scanner)1 AbstractMessageTable (co.cask.cdap.messaging.store.AbstractMessageTable)1