Search in sources :

Example 1 with DB

use of org.iq80.leveldb.DB in project camel by apache.

the class LevelDBFile method start.

public void start() {
    if (getFile() == null) {
        throw new IllegalArgumentException("A file must be configured");
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Starting LevelDB using file: {}", getFile());
    }
    Options options = new Options().writeBufferSize(writeBufferSize).maxOpenFiles(maxOpenFiles).blockRestartInterval(blockRestartInterval).blockSize(blockSize).verifyChecksums(verifyChecksums).paranoidChecks(paranoidChecks).cacheSize(cacheSize);
    if ("snappy".equals(compressionType)) {
        options.compressionType(CompressionType.SNAPPY);
    } else {
        options.compressionType(CompressionType.NONE);
    }
    options.createIfMissing(true);
    try {
        getFile().getParentFile().mkdirs();
        db = factory.open(getFile(), options);
    } catch (IOException ioe) {
        throw new RuntimeException("Error opening LevelDB with file " + getFile(), ioe);
    }
}
Also used : Options(org.iq80.leveldb.Options) WriteOptions(org.iq80.leveldb.WriteOptions) IOException(java.io.IOException)

Example 2 with DB

use of org.iq80.leveldb.DB in project hadoop by apache.

the class RollingLevelDBTimelineStore method putEntities.

/**
   * Put a single entity. If there is an error, add a TimelinePutError to the
   * given response.
   *
   * @param entityUpdates
   *          a map containing all the scheduled writes for this put to the
   *          entity db
   * @param indexUpdates
   *          a map containing all the scheduled writes for this put to the
   *          index db
   */
private long putEntities(TreeMap<Long, RollingWriteBatch> entityUpdates, TreeMap<Long, RollingWriteBatch> indexUpdates, TimelineEntity entity, TimelinePutResponse response) {
    long putCount = 0;
    List<EntityIdentifier> relatedEntitiesWithoutStartTimes = new ArrayList<EntityIdentifier>();
    byte[] revStartTime = null;
    Map<String, Set<Object>> primaryFilters = null;
    try {
        List<TimelineEvent> events = entity.getEvents();
        // look up the start time for the entity
        Long startTime = getAndSetStartTime(entity.getEntityId(), entity.getEntityType(), entity.getStartTime(), events);
        if (startTime == null) {
            // if no start time is found, add an error and return
            TimelinePutError error = new TimelinePutError();
            error.setEntityId(entity.getEntityId());
            error.setEntityType(entity.getEntityType());
            error.setErrorCode(TimelinePutError.NO_START_TIME);
            response.addError(error);
            return putCount;
        }
        // Must have a domain
        if (StringUtils.isEmpty(entity.getDomainId())) {
            TimelinePutError error = new TimelinePutError();
            error.setEntityId(entity.getEntityId());
            error.setEntityType(entity.getEntityType());
            error.setErrorCode(TimelinePutError.NO_DOMAIN);
            response.addError(error);
            return putCount;
        }
        revStartTime = writeReverseOrderedLong(startTime);
        long roundedStartTime = entitydb.computeCurrentCheckMillis(startTime);
        RollingWriteBatch rollingWriteBatch = entityUpdates.get(roundedStartTime);
        if (rollingWriteBatch == null) {
            DB db = entitydb.getDBForStartTime(startTime);
            if (db != null) {
                WriteBatch writeBatch = db.createWriteBatch();
                rollingWriteBatch = new RollingWriteBatch(db, writeBatch);
                entityUpdates.put(roundedStartTime, rollingWriteBatch);
            }
        }
        if (rollingWriteBatch == null) {
            // if no start time is found, add an error and return
            TimelinePutError error = new TimelinePutError();
            error.setEntityId(entity.getEntityId());
            error.setEntityType(entity.getEntityType());
            error.setErrorCode(TimelinePutError.EXPIRED_ENTITY);
            response.addError(error);
            return putCount;
        }
        WriteBatch writeBatch = rollingWriteBatch.getWriteBatch();
        // Save off the getBytes conversion to avoid unnecessary cost
        byte[] entityIdBytes = entity.getEntityId().getBytes(UTF_8);
        byte[] entityTypeBytes = entity.getEntityType().getBytes(UTF_8);
        byte[] domainIdBytes = entity.getDomainId().getBytes(UTF_8);
        // write entity marker
        byte[] markerKey = KeyBuilder.newInstance(3).add(entityTypeBytes, true).add(revStartTime).add(entityIdBytes, true).getBytesForLookup();
        writeBatch.put(markerKey, EMPTY_BYTES);
        ++putCount;
        // write domain id entry
        byte[] domainkey = KeyBuilder.newInstance(4).add(entityTypeBytes, true).add(revStartTime).add(entityIdBytes, true).add(DOMAIN_ID_COLUMN).getBytes();
        writeBatch.put(domainkey, domainIdBytes);
        ++putCount;
        // write event entries
        if (events != null) {
            for (TimelineEvent event : events) {
                byte[] revts = writeReverseOrderedLong(event.getTimestamp());
                byte[] key = KeyBuilder.newInstance().add(entityTypeBytes, true).add(revStartTime).add(entityIdBytes, true).add(EVENTS_COLUMN).add(revts).add(event.getEventType().getBytes(UTF_8)).getBytes();
                byte[] value = fstConf.asByteArray(event.getEventInfo());
                writeBatch.put(key, value);
                ++putCount;
            }
        }
        // write primary filter entries
        primaryFilters = entity.getPrimaryFilters();
        if (primaryFilters != null) {
            for (Entry<String, Set<Object>> primaryFilter : primaryFilters.entrySet()) {
                for (Object primaryFilterValue : primaryFilter.getValue()) {
                    byte[] key = KeyBuilder.newInstance(6).add(entityTypeBytes, true).add(revStartTime).add(entityIdBytes, true).add(PRIMARY_FILTERS_COLUMN).add(primaryFilter.getKey()).add(fstConf.asByteArray(primaryFilterValue)).getBytes();
                    writeBatch.put(key, EMPTY_BYTES);
                    ++putCount;
                }
            }
        }
        // write other info entries
        Map<String, Object> otherInfo = entity.getOtherInfo();
        if (otherInfo != null) {
            for (Entry<String, Object> info : otherInfo.entrySet()) {
                byte[] key = KeyBuilder.newInstance(5).add(entityTypeBytes, true).add(revStartTime).add(entityIdBytes, true).add(OTHER_INFO_COLUMN).add(info.getKey()).getBytes();
                byte[] value = fstConf.asByteArray(info.getValue());
                writeBatch.put(key, value);
                ++putCount;
            }
        }
        // write related entity entries
        Map<String, Set<String>> relatedEntities = entity.getRelatedEntities();
        if (relatedEntities != null) {
            for (Entry<String, Set<String>> relatedEntityList : relatedEntities.entrySet()) {
                String relatedEntityType = relatedEntityList.getKey();
                for (String relatedEntityId : relatedEntityList.getValue()) {
                    // look up start time of related entity
                    Long relatedStartTimeLong = getStartTimeLong(relatedEntityId, relatedEntityType);
                    // delay writing the related entity if no start time is found
                    if (relatedStartTimeLong == null) {
                        relatedEntitiesWithoutStartTimes.add(new EntityIdentifier(relatedEntityId, relatedEntityType));
                        continue;
                    }
                    byte[] relatedEntityStartTime = writeReverseOrderedLong(relatedStartTimeLong);
                    long relatedRoundedStartTime = entitydb.computeCurrentCheckMillis(relatedStartTimeLong);
                    RollingWriteBatch relatedRollingWriteBatch = entityUpdates.get(relatedRoundedStartTime);
                    if (relatedRollingWriteBatch == null) {
                        DB db = entitydb.getDBForStartTime(relatedStartTimeLong);
                        if (db != null) {
                            WriteBatch relatedWriteBatch = db.createWriteBatch();
                            relatedRollingWriteBatch = new RollingWriteBatch(db, relatedWriteBatch);
                            entityUpdates.put(relatedRoundedStartTime, relatedRollingWriteBatch);
                        }
                    }
                    if (relatedRollingWriteBatch == null) {
                        // if no start time is found, add an error and return
                        TimelinePutError error = new TimelinePutError();
                        error.setEntityId(entity.getEntityId());
                        error.setEntityType(entity.getEntityType());
                        error.setErrorCode(TimelinePutError.EXPIRED_ENTITY);
                        response.addError(error);
                        continue;
                    }
                    // This is the existing entity
                    byte[] relatedDomainIdBytes = relatedRollingWriteBatch.getDB().get(createDomainIdKey(relatedEntityId, relatedEntityType, relatedEntityStartTime));
                    // The timeline data created by the server before 2.6 won't have
                    // the domain field. We assume this timeline data is in the
                    // default timeline domain.
                    String domainId = null;
                    if (relatedDomainIdBytes == null) {
                        domainId = TimelineDataManager.DEFAULT_DOMAIN_ID;
                    } else {
                        domainId = new String(relatedDomainIdBytes, UTF_8);
                    }
                    if (!domainId.equals(entity.getDomainId())) {
                        // in this case the entity will be put, but the relation will be
                        // ignored
                        TimelinePutError error = new TimelinePutError();
                        error.setEntityId(entity.getEntityId());
                        error.setEntityType(entity.getEntityType());
                        error.setErrorCode(TimelinePutError.FORBIDDEN_RELATION);
                        response.addError(error);
                        continue;
                    }
                    // write "forward" entry (related entity -> entity)
                    byte[] key = createRelatedEntityKey(relatedEntityId, relatedEntityType, relatedEntityStartTime, entity.getEntityId(), entity.getEntityType());
                    WriteBatch relatedWriteBatch = relatedRollingWriteBatch.getWriteBatch();
                    relatedWriteBatch.put(key, EMPTY_BYTES);
                    ++putCount;
                }
            }
        }
        // write index entities
        RollingWriteBatch indexRollingWriteBatch = indexUpdates.get(roundedStartTime);
        if (indexRollingWriteBatch == null) {
            DB db = indexdb.getDBForStartTime(startTime);
            if (db != null) {
                WriteBatch indexWriteBatch = db.createWriteBatch();
                indexRollingWriteBatch = new RollingWriteBatch(db, indexWriteBatch);
                indexUpdates.put(roundedStartTime, indexRollingWriteBatch);
            }
        }
        if (indexRollingWriteBatch == null) {
            // if no start time is found, add an error and return
            TimelinePutError error = new TimelinePutError();
            error.setEntityId(entity.getEntityId());
            error.setEntityType(entity.getEntityType());
            error.setErrorCode(TimelinePutError.EXPIRED_ENTITY);
            response.addError(error);
            return putCount;
        }
        WriteBatch indexWriteBatch = indexRollingWriteBatch.getWriteBatch();
        putCount += writePrimaryFilterEntries(indexWriteBatch, primaryFilters, markerKey, EMPTY_BYTES);
    } catch (IOException e) {
        LOG.error("Error putting entity " + entity.getEntityId() + " of type " + entity.getEntityType(), e);
        TimelinePutError error = new TimelinePutError();
        error.setEntityId(entity.getEntityId());
        error.setEntityType(entity.getEntityType());
        error.setErrorCode(TimelinePutError.IO_EXCEPTION);
        response.addError(error);
    }
    for (EntityIdentifier relatedEntity : relatedEntitiesWithoutStartTimes) {
        try {
            Long relatedEntityStartAndInsertTime = getAndSetStartTime(relatedEntity.getId(), relatedEntity.getType(), readReverseOrderedLong(revStartTime, 0), null);
            if (relatedEntityStartAndInsertTime == null) {
                throw new IOException("Error setting start time for related entity");
            }
            long relatedStartTimeLong = relatedEntityStartAndInsertTime;
            long relatedRoundedStartTime = entitydb.computeCurrentCheckMillis(relatedStartTimeLong);
            RollingWriteBatch relatedRollingWriteBatch = entityUpdates.get(relatedRoundedStartTime);
            if (relatedRollingWriteBatch == null) {
                DB db = entitydb.getDBForStartTime(relatedStartTimeLong);
                if (db != null) {
                    WriteBatch relatedWriteBatch = db.createWriteBatch();
                    relatedRollingWriteBatch = new RollingWriteBatch(db, relatedWriteBatch);
                    entityUpdates.put(relatedRoundedStartTime, relatedRollingWriteBatch);
                }
            }
            if (relatedRollingWriteBatch == null) {
                // if no start time is found, add an error and return
                TimelinePutError error = new TimelinePutError();
                error.setEntityId(entity.getEntityId());
                error.setEntityType(entity.getEntityType());
                error.setErrorCode(TimelinePutError.EXPIRED_ENTITY);
                response.addError(error);
                continue;
            }
            WriteBatch relatedWriteBatch = relatedRollingWriteBatch.getWriteBatch();
            byte[] relatedEntityStartTime = writeReverseOrderedLong(relatedEntityStartAndInsertTime);
            // This is the new entity, the domain should be the same
            byte[] key = createDomainIdKey(relatedEntity.getId(), relatedEntity.getType(), relatedEntityStartTime);
            relatedWriteBatch.put(key, entity.getDomainId().getBytes(UTF_8));
            ++putCount;
            relatedWriteBatch.put(createRelatedEntityKey(relatedEntity.getId(), relatedEntity.getType(), relatedEntityStartTime, entity.getEntityId(), entity.getEntityType()), EMPTY_BYTES);
            ++putCount;
            relatedWriteBatch.put(createEntityMarkerKey(relatedEntity.getId(), relatedEntity.getType(), relatedEntityStartTime), EMPTY_BYTES);
            ++putCount;
        } catch (IOException e) {
            LOG.error("Error putting related entity " + relatedEntity.getId() + " of type " + relatedEntity.getType() + " for entity " + entity.getEntityId() + " of type " + entity.getEntityType(), e);
            TimelinePutError error = new TimelinePutError();
            error.setEntityId(entity.getEntityId());
            error.setEntityType(entity.getEntityType());
            error.setErrorCode(TimelinePutError.IO_EXCEPTION);
            response.addError(error);
        }
    }
    return putCount;
}
Also used : TimelineEvent(org.apache.hadoop.yarn.api.records.timeline.TimelineEvent) SortedSet(java.util.SortedSet) EnumSet(java.util.EnumSet) Set(java.util.Set) RollingWriteBatch(org.apache.hadoop.yarn.server.timeline.RollingLevelDB.RollingWriteBatch) ArrayList(java.util.ArrayList) IOException(java.io.IOException) TimelinePutError(org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError) GenericObjectMapper.writeReverseOrderedLong(org.apache.hadoop.yarn.server.timeline.GenericObjectMapper.writeReverseOrderedLong) GenericObjectMapper.readReverseOrderedLong(org.apache.hadoop.yarn.server.timeline.GenericObjectMapper.readReverseOrderedLong) RollingWriteBatch(org.apache.hadoop.yarn.server.timeline.RollingLevelDB.RollingWriteBatch) WriteBatch(org.iq80.leveldb.WriteBatch) DB(org.iq80.leveldb.DB)

Example 3 with DB

use of org.iq80.leveldb.DB in project hadoop by apache.

the class RollingLevelDBTimelineStore method getEntityTimelines.

@Override
public TimelineEvents getEntityTimelines(String entityType, SortedSet<String> entityIds, Long limit, Long windowStart, Long windowEnd, Set<String> eventType) throws IOException {
    TimelineEvents events = new TimelineEvents();
    if (entityIds == null || entityIds.isEmpty()) {
        return events;
    }
    // create a lexicographically-ordered map from start time to entities
    Map<byte[], List<EntityIdentifier>> startTimeMap = new TreeMap<byte[], List<EntityIdentifier>>(new Comparator<byte[]>() {

        @Override
        public int compare(byte[] o1, byte[] o2) {
            return WritableComparator.compareBytes(o1, 0, o1.length, o2, 0, o2.length);
        }
    });
    DBIterator iterator = null;
    try {
        // skip entities with no start time
        for (String entityId : entityIds) {
            byte[] startTime = getStartTime(entityId, entityType);
            if (startTime != null) {
                List<EntityIdentifier> entities = startTimeMap.get(startTime);
                if (entities == null) {
                    entities = new ArrayList<EntityIdentifier>();
                    startTimeMap.put(startTime, entities);
                }
                entities.add(new EntityIdentifier(entityId, entityType));
            }
        }
        for (Entry<byte[], List<EntityIdentifier>> entry : startTimeMap.entrySet()) {
            // look up the events matching the given parameters (limit,
            // start time, end time, event types) for entities whose start times
            // were found and add the entities to the return list
            byte[] revStartTime = entry.getKey();
            for (EntityIdentifier entityIdentifier : entry.getValue()) {
                EventsOfOneEntity entity = new EventsOfOneEntity();
                entity.setEntityId(entityIdentifier.getId());
                entity.setEntityType(entityType);
                events.addEvent(entity);
                KeyBuilder kb = KeyBuilder.newInstance().add(entityType).add(revStartTime).add(entityIdentifier.getId()).add(EVENTS_COLUMN);
                byte[] prefix = kb.getBytesForLookup();
                if (windowEnd == null) {
                    windowEnd = Long.MAX_VALUE;
                }
                byte[] revts = writeReverseOrderedLong(windowEnd);
                kb.add(revts);
                byte[] first = kb.getBytesForLookup();
                byte[] last = null;
                if (windowStart != null) {
                    last = KeyBuilder.newInstance().add(prefix).add(writeReverseOrderedLong(windowStart)).getBytesForLookup();
                }
                if (limit == null) {
                    limit = DEFAULT_LIMIT;
                }
                DB db = entitydb.getDBForStartTime(readReverseOrderedLong(revStartTime, 0));
                if (db == null) {
                    continue;
                }
                iterator = db.iterator();
                for (iterator.seek(first); entity.getEvents().size() < limit && iterator.hasNext(); iterator.next()) {
                    byte[] key = iterator.peekNext().getKey();
                    if (!prefixMatches(prefix, prefix.length, key) || (last != null && WritableComparator.compareBytes(key, 0, key.length, last, 0, last.length) > 0)) {
                        break;
                    }
                    TimelineEvent event = getEntityEvent(eventType, key, prefix.length, iterator.peekNext().getValue());
                    if (event != null) {
                        entity.addEvent(event);
                    }
                }
            }
        }
    } finally {
        IOUtils.cleanup(LOG, iterator);
    }
    return events;
}
Also used : TimelineEvent(org.apache.hadoop.yarn.api.records.timeline.TimelineEvent) EventsOfOneEntity(org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEntity) TimelineEvents(org.apache.hadoop.yarn.api.records.timeline.TimelineEvents) TreeMap(java.util.TreeMap) DBIterator(org.iq80.leveldb.DBIterator) KeyBuilder(org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.KeyBuilder) List(java.util.List) ArrayList(java.util.ArrayList) DB(org.iq80.leveldb.DB)

Example 4 with DB

use of org.iq80.leveldb.DB in project hadoop by apache.

the class LeveldbTimelineStateStore method startStorage.

@Override
protected void startStorage() throws IOException {
    Options options = new Options();
    Path dbPath = new Path(getConfig().get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_STATE_STORE_PATH), DB_NAME);
    FileSystem localFS = null;
    try {
        localFS = FileSystem.getLocal(getConfig());
        if (!localFS.exists(dbPath)) {
            if (!localFS.mkdirs(dbPath)) {
                throw new IOException("Couldn't create directory for leveldb " + "timeline store " + dbPath);
            }
            localFS.setPermission(dbPath, LEVELDB_DIR_UMASK);
        }
    } finally {
        IOUtils.cleanup(LOG, localFS);
    }
    JniDBFactory factory = new JniDBFactory();
    try {
        options.createIfMissing(false);
        db = factory.open(new File(dbPath.toString()), options);
        LOG.info("Loading the existing database at th path: " + dbPath.toString());
        checkVersion();
    } catch (NativeDB.DBException e) {
        if (e.isNotFound() || e.getMessage().contains(" does not exist ")) {
            try {
                options.createIfMissing(true);
                db = factory.open(new File(dbPath.toString()), options);
                LOG.info("Creating a new database at th path: " + dbPath.toString());
                storeVersion(CURRENT_VERSION_INFO);
            } catch (DBException ex) {
                throw new IOException(ex);
            }
        } else {
            throw new IOException(e);
        }
    } catch (DBException e) {
        throw new IOException(e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Options(org.iq80.leveldb.Options) DBException(org.iq80.leveldb.DBException) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) JniDBFactory(org.fusesource.leveldbjni.JniDBFactory) NativeDB(org.fusesource.leveldbjni.internal.NativeDB) File(java.io.File)

Example 5 with DB

use of org.iq80.leveldb.DB in project hadoop by apache.

the class TestRollingLevelDB method testInsertAfterRollPeriodRollsDB.

@Test
public void testInsertAfterRollPeriodRollsDB() throws Exception {
    rollingLevelDB.init(conf);
    long now = rollingLevelDB.currentTimeMillis();
    DB db = rollingLevelDB.getDBForStartTime(now);
    long startTime = rollingLevelDB.getStartTimeFor(db);
    Assert.assertEquals("Received level db for incorrect start time", rollingLevelDB.computeCurrentCheckMillis(now), startTime);
    now = rollingLevelDB.getNextRollingTimeMillis();
    rollingLevelDB.setCurrentTimeMillis(now);
    db = rollingLevelDB.getDBForStartTime(now);
    startTime = rollingLevelDB.getStartTimeFor(db);
    Assert.assertEquals("Received level db for incorrect start time", rollingLevelDB.computeCurrentCheckMillis(now), startTime);
}
Also used : DB(org.iq80.leveldb.DB) Test(org.junit.Test)

Aggregations

IOException (java.io.IOException)25 DB (org.iq80.leveldb.DB)25 DBException (org.iq80.leveldb.DBException)20 LeveldbIterator (org.apache.hadoop.yarn.server.utils.LeveldbIterator)16 Options (org.iq80.leveldb.Options)16 File (java.io.File)15 JniDBFactory.asString (org.fusesource.leveldbjni.JniDBFactory.asString)14 WriteBatch (org.iq80.leveldb.WriteBatch)9 DBIterator (org.iq80.leveldb.DBIterator)7 Map (java.util.Map)5 Path (org.apache.hadoop.fs.Path)5 Test (org.junit.Test)5 ImmutableMap (com.google.common.collect.ImmutableMap)4 ImmutableSortedMap (com.google.common.collect.ImmutableSortedMap)4 NavigableMap (java.util.NavigableMap)4 NativeDB (org.fusesource.leveldbjni.internal.NativeDB)4 WriteOptions (org.iq80.leveldb.WriteOptions)4 DB (com.codecademy.eventhub.base.DB)3 Provides (com.google.inject.Provides)3 ArrayList (java.util.ArrayList)3