Search in sources :

Example 11 with DB

use of org.iq80.leveldb.DB in project hadoop by apache.

the class LeveldbRMStateStore method openDatabase.

protected DB openDatabase() throws Exception {
    Path storeRoot = createStorageDir();
    Options options = new Options();
    options.createIfMissing(false);
    options.logger(new LeveldbLogger());
    LOG.info("Using state database at " + storeRoot + " for recovery");
    File dbfile = new File(storeRoot.toString());
    try {
        db = JniDBFactory.factory.open(dbfile, options);
    } catch (NativeDB.DBException e) {
        if (e.isNotFound() || e.getMessage().contains(" does not exist ")) {
            LOG.info("Creating state database at " + dbfile);
            options.createIfMissing(true);
            try {
                db = JniDBFactory.factory.open(dbfile, options);
                // store version
                storeVersion();
            } catch (DBException dbErr) {
                throw new IOException(dbErr.getMessage(), dbErr);
            }
        } else {
            throw e;
        }
    }
    return db;
}
Also used : Path(org.apache.hadoop.fs.Path) Options(org.iq80.leveldb.Options) DBException(org.iq80.leveldb.DBException) IOException(java.io.IOException) NativeDB(org.fusesource.leveldbjni.internal.NativeDB) File(java.io.File)

Example 12 with DB

use of org.iq80.leveldb.DB in project hadoop by apache.

the class LeveldbRMStateStore method loadReservationState.

private void loadReservationState(RMState rmState) throws IOException {
    int numReservations = 0;
    LeveldbIterator iter = null;
    try {
        iter = new LeveldbIterator(db);
        iter.seek(bytes(RM_RESERVATION_KEY_PREFIX));
        while (iter.hasNext()) {
            Entry<byte[], byte[]> entry = iter.next();
            String key = asString(entry.getKey());
            String planReservationString = key.substring(RM_RESERVATION_KEY_PREFIX.length());
            String[] parts = planReservationString.split(SEPARATOR);
            if (parts.length != 2) {
                LOG.warn("Incorrect reservation state key " + key);
                continue;
            }
            String planName = parts[0];
            String reservationName = parts[1];
            ReservationAllocationStateProto allocationState = ReservationAllocationStateProto.parseFrom(entry.getValue());
            if (!rmState.getReservationState().containsKey(planName)) {
                rmState.getReservationState().put(planName, new HashMap<ReservationId, ReservationAllocationStateProto>());
            }
            ReservationId reservationId = ReservationId.parseReservationId(reservationName);
            rmState.getReservationState().get(planName).put(reservationId, allocationState);
            numReservations++;
        }
    } catch (DBException e) {
        throw new IOException(e);
    } finally {
        if (iter != null) {
            iter.close();
        }
    }
    LOG.info("Recovered " + numReservations + " reservations");
}
Also used : DBException(org.iq80.leveldb.DBException) LeveldbIterator(org.apache.hadoop.yarn.server.utils.LeveldbIterator) ReservationId(org.apache.hadoop.yarn.api.records.ReservationId) JniDBFactory.asString(org.fusesource.leveldbjni.JniDBFactory.asString) IOException(java.io.IOException) ReservationAllocationStateProto(org.apache.hadoop.yarn.proto.YarnProtos.ReservationAllocationStateProto)

Example 13 with DB

use of org.iq80.leveldb.DB in project hadoop by apache.

the class TestNMLeveldbStateStoreService method testContainerStorage.

@Test
public void testContainerStorage() throws IOException {
    // test empty when no state
    List<RecoveredContainerState> recoveredContainers = stateStore.loadContainersState();
    assertTrue(recoveredContainers.isEmpty());
    // create a container request
    ApplicationId appId = ApplicationId.newInstance(1234, 3);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 4);
    ContainerId containerId = ContainerId.newContainerId(appAttemptId, 5);
    StartContainerRequest containerReq = createContainerRequest(containerId);
    // store a container and verify recovered
    stateStore.storeContainer(containerId, 0, containerReq);
    // verify the container version key is not stored for new containers
    DB db = stateStore.getDB();
    assertNull("version key present for new container", db.get(bytes(stateStore.getContainerVersionKey(containerId.toString()))));
    restartStateStore();
    recoveredContainers = stateStore.loadContainersState();
    assertEquals(1, recoveredContainers.size());
    RecoveredContainerState rcs = recoveredContainers.get(0);
    assertEquals(0, rcs.getVersion());
    assertEquals(RecoveredContainerStatus.REQUESTED, rcs.getStatus());
    assertEquals(ContainerExitStatus.INVALID, rcs.getExitCode());
    assertEquals(false, rcs.getKilled());
    assertEquals(containerReq, rcs.getStartRequest());
    assertTrue(rcs.getDiagnostics().isEmpty());
    // store a new container record without StartContainerRequest
    ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 6);
    stateStore.storeContainerLaunched(containerId1);
    recoveredContainers = stateStore.loadContainersState();
    // check whether the new container record is discarded
    assertEquals(1, recoveredContainers.size());
    // queue the container, and verify recovered
    stateStore.storeContainerQueued(containerId);
    restartStateStore();
    recoveredContainers = stateStore.loadContainersState();
    assertEquals(1, recoveredContainers.size());
    rcs = recoveredContainers.get(0);
    assertEquals(RecoveredContainerStatus.QUEUED, rcs.getStatus());
    assertEquals(ContainerExitStatus.INVALID, rcs.getExitCode());
    assertEquals(false, rcs.getKilled());
    assertEquals(containerReq, rcs.getStartRequest());
    assertTrue(rcs.getDiagnostics().isEmpty());
    // launch the container, add some diagnostics, and verify recovered
    StringBuilder diags = new StringBuilder();
    stateStore.storeContainerLaunched(containerId);
    diags.append("some diags for container");
    stateStore.storeContainerDiagnostics(containerId, diags);
    restartStateStore();
    recoveredContainers = stateStore.loadContainersState();
    assertEquals(1, recoveredContainers.size());
    rcs = recoveredContainers.get(0);
    assertEquals(RecoveredContainerStatus.LAUNCHED, rcs.getStatus());
    assertEquals(ContainerExitStatus.INVALID, rcs.getExitCode());
    assertEquals(false, rcs.getKilled());
    assertEquals(containerReq, rcs.getStartRequest());
    assertEquals(diags.toString(), rcs.getDiagnostics());
    // increase the container size, and verify recovered
    stateStore.storeContainerResourceChanged(containerId, 2, Resource.newInstance(2468, 4));
    restartStateStore();
    recoveredContainers = stateStore.loadContainersState();
    assertEquals(1, recoveredContainers.size());
    rcs = recoveredContainers.get(0);
    assertEquals(2, rcs.getVersion());
    assertEquals(RecoveredContainerStatus.LAUNCHED, rcs.getStatus());
    assertEquals(ContainerExitStatus.INVALID, rcs.getExitCode());
    assertEquals(false, rcs.getKilled());
    assertEquals(Resource.newInstance(2468, 4), rcs.getCapability());
    // mark the container killed, add some more diags, and verify recovered
    diags.append("some more diags for container");
    stateStore.storeContainerDiagnostics(containerId, diags);
    stateStore.storeContainerKilled(containerId);
    restartStateStore();
    recoveredContainers = stateStore.loadContainersState();
    assertEquals(1, recoveredContainers.size());
    rcs = recoveredContainers.get(0);
    assertEquals(RecoveredContainerStatus.LAUNCHED, rcs.getStatus());
    assertEquals(ContainerExitStatus.INVALID, rcs.getExitCode());
    assertTrue(rcs.getKilled());
    assertEquals(containerReq, rcs.getStartRequest());
    assertEquals(diags.toString(), rcs.getDiagnostics());
    // add yet more diags, mark container completed, and verify recovered
    diags.append("some final diags");
    stateStore.storeContainerDiagnostics(containerId, diags);
    stateStore.storeContainerCompleted(containerId, 21);
    restartStateStore();
    recoveredContainers = stateStore.loadContainersState();
    assertEquals(1, recoveredContainers.size());
    rcs = recoveredContainers.get(0);
    assertEquals(RecoveredContainerStatus.COMPLETED, rcs.getStatus());
    assertEquals(21, rcs.getExitCode());
    assertTrue(rcs.getKilled());
    assertEquals(containerReq, rcs.getStartRequest());
    assertEquals(diags.toString(), rcs.getDiagnostics());
    // store remainingRetryAttempts, workDir and logDir
    stateStore.storeContainerRemainingRetryAttempts(containerId, 6);
    stateStore.storeContainerWorkDir(containerId, "/test/workdir");
    stateStore.storeContainerLogDir(containerId, "/test/logdir");
    restartStateStore();
    recoveredContainers = stateStore.loadContainersState();
    assertEquals(1, recoveredContainers.size());
    rcs = recoveredContainers.get(0);
    assertEquals(6, rcs.getRemainingRetryAttempts());
    assertEquals("/test/workdir", rcs.getWorkDir());
    assertEquals("/test/logdir", rcs.getLogDir());
    // remove the container and verify not recovered
    stateStore.removeContainer(containerId);
    restartStateStore();
    recoveredContainers = stateStore.loadContainersState();
    assertTrue(recoveredContainers.isEmpty());
}
Also used : ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) RecoveredContainerState(org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerState) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) DB(org.iq80.leveldb.DB) StartContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) Test(org.junit.Test)

Example 14 with DB

use of org.iq80.leveldb.DB in project hadoop by apache.

the class HistoryServerLeveldbStateStoreService method loadTokenMasterKeys.

private int loadTokenMasterKeys(HistoryServerState state) throws IOException {
    int numKeys = 0;
    LeveldbIterator iter = null;
    try {
        iter = new LeveldbIterator(db);
        iter.seek(bytes(TOKEN_MASTER_KEY_KEY_PREFIX));
        while (iter.hasNext()) {
            Entry<byte[], byte[]> entry = iter.next();
            String key = asString(entry.getKey());
            if (!key.startsWith(TOKEN_MASTER_KEY_KEY_PREFIX)) {
                break;
            }
            if (LOG.isDebugEnabled()) {
                LOG.debug("Loading master key from " + key);
            }
            try {
                loadTokenMasterKey(state, entry.getValue());
            } catch (IOException e) {
                throw new IOException("Error loading token master key from " + key, e);
            }
            ++numKeys;
        }
    } catch (DBException e) {
        throw new IOException(e);
    } finally {
        if (iter != null) {
            iter.close();
        }
    }
    return numKeys;
}
Also used : DBException(org.iq80.leveldb.DBException) LeveldbIterator(org.apache.hadoop.yarn.server.utils.LeveldbIterator) JniDBFactory.asString(org.fusesource.leveldbjni.JniDBFactory.asString) IOException(java.io.IOException)

Example 15 with DB

use of org.iq80.leveldb.DB in project hadoop by apache.

the class RollingLevelDBTimelineStore method putEntities.

/**
   * Put a single entity. If there is an error, add a TimelinePutError to the
   * given response.
   *
   * @param entityUpdates
   *          a map containing all the scheduled writes for this put to the
   *          entity db
   * @param indexUpdates
   *          a map containing all the scheduled writes for this put to the
   *          index db
   */
private long putEntities(TreeMap<Long, RollingWriteBatch> entityUpdates, TreeMap<Long, RollingWriteBatch> indexUpdates, TimelineEntity entity, TimelinePutResponse response) {
    long putCount = 0;
    List<EntityIdentifier> relatedEntitiesWithoutStartTimes = new ArrayList<EntityIdentifier>();
    byte[] revStartTime = null;
    Map<String, Set<Object>> primaryFilters = null;
    try {
        List<TimelineEvent> events = entity.getEvents();
        // look up the start time for the entity
        Long startTime = getAndSetStartTime(entity.getEntityId(), entity.getEntityType(), entity.getStartTime(), events);
        if (startTime == null) {
            // if no start time is found, add an error and return
            TimelinePutError error = new TimelinePutError();
            error.setEntityId(entity.getEntityId());
            error.setEntityType(entity.getEntityType());
            error.setErrorCode(TimelinePutError.NO_START_TIME);
            response.addError(error);
            return putCount;
        }
        // Must have a domain
        if (StringUtils.isEmpty(entity.getDomainId())) {
            TimelinePutError error = new TimelinePutError();
            error.setEntityId(entity.getEntityId());
            error.setEntityType(entity.getEntityType());
            error.setErrorCode(TimelinePutError.NO_DOMAIN);
            response.addError(error);
            return putCount;
        }
        revStartTime = writeReverseOrderedLong(startTime);
        long roundedStartTime = entitydb.computeCurrentCheckMillis(startTime);
        RollingWriteBatch rollingWriteBatch = entityUpdates.get(roundedStartTime);
        if (rollingWriteBatch == null) {
            DB db = entitydb.getDBForStartTime(startTime);
            if (db != null) {
                WriteBatch writeBatch = db.createWriteBatch();
                rollingWriteBatch = new RollingWriteBatch(db, writeBatch);
                entityUpdates.put(roundedStartTime, rollingWriteBatch);
            }
        }
        if (rollingWriteBatch == null) {
            // if no start time is found, add an error and return
            TimelinePutError error = new TimelinePutError();
            error.setEntityId(entity.getEntityId());
            error.setEntityType(entity.getEntityType());
            error.setErrorCode(TimelinePutError.EXPIRED_ENTITY);
            response.addError(error);
            return putCount;
        }
        WriteBatch writeBatch = rollingWriteBatch.getWriteBatch();
        // Save off the getBytes conversion to avoid unnecessary cost
        byte[] entityIdBytes = entity.getEntityId().getBytes(UTF_8);
        byte[] entityTypeBytes = entity.getEntityType().getBytes(UTF_8);
        byte[] domainIdBytes = entity.getDomainId().getBytes(UTF_8);
        // write entity marker
        byte[] markerKey = KeyBuilder.newInstance(3).add(entityTypeBytes, true).add(revStartTime).add(entityIdBytes, true).getBytesForLookup();
        writeBatch.put(markerKey, EMPTY_BYTES);
        ++putCount;
        // write domain id entry
        byte[] domainkey = KeyBuilder.newInstance(4).add(entityTypeBytes, true).add(revStartTime).add(entityIdBytes, true).add(DOMAIN_ID_COLUMN).getBytes();
        writeBatch.put(domainkey, domainIdBytes);
        ++putCount;
        // write event entries
        if (events != null) {
            for (TimelineEvent event : events) {
                byte[] revts = writeReverseOrderedLong(event.getTimestamp());
                byte[] key = KeyBuilder.newInstance().add(entityTypeBytes, true).add(revStartTime).add(entityIdBytes, true).add(EVENTS_COLUMN).add(revts).add(event.getEventType().getBytes(UTF_8)).getBytes();
                byte[] value = fstConf.asByteArray(event.getEventInfo());
                writeBatch.put(key, value);
                ++putCount;
            }
        }
        // write primary filter entries
        primaryFilters = entity.getPrimaryFilters();
        if (primaryFilters != null) {
            for (Entry<String, Set<Object>> primaryFilter : primaryFilters.entrySet()) {
                for (Object primaryFilterValue : primaryFilter.getValue()) {
                    byte[] key = KeyBuilder.newInstance(6).add(entityTypeBytes, true).add(revStartTime).add(entityIdBytes, true).add(PRIMARY_FILTERS_COLUMN).add(primaryFilter.getKey()).add(fstConf.asByteArray(primaryFilterValue)).getBytes();
                    writeBatch.put(key, EMPTY_BYTES);
                    ++putCount;
                }
            }
        }
        // write other info entries
        Map<String, Object> otherInfo = entity.getOtherInfo();
        if (otherInfo != null) {
            for (Entry<String, Object> info : otherInfo.entrySet()) {
                byte[] key = KeyBuilder.newInstance(5).add(entityTypeBytes, true).add(revStartTime).add(entityIdBytes, true).add(OTHER_INFO_COLUMN).add(info.getKey()).getBytes();
                byte[] value = fstConf.asByteArray(info.getValue());
                writeBatch.put(key, value);
                ++putCount;
            }
        }
        // write related entity entries
        Map<String, Set<String>> relatedEntities = entity.getRelatedEntities();
        if (relatedEntities != null) {
            for (Entry<String, Set<String>> relatedEntityList : relatedEntities.entrySet()) {
                String relatedEntityType = relatedEntityList.getKey();
                for (String relatedEntityId : relatedEntityList.getValue()) {
                    // look up start time of related entity
                    Long relatedStartTimeLong = getStartTimeLong(relatedEntityId, relatedEntityType);
                    // delay writing the related entity if no start time is found
                    if (relatedStartTimeLong == null) {
                        relatedEntitiesWithoutStartTimes.add(new EntityIdentifier(relatedEntityId, relatedEntityType));
                        continue;
                    }
                    byte[] relatedEntityStartTime = writeReverseOrderedLong(relatedStartTimeLong);
                    long relatedRoundedStartTime = entitydb.computeCurrentCheckMillis(relatedStartTimeLong);
                    RollingWriteBatch relatedRollingWriteBatch = entityUpdates.get(relatedRoundedStartTime);
                    if (relatedRollingWriteBatch == null) {
                        DB db = entitydb.getDBForStartTime(relatedStartTimeLong);
                        if (db != null) {
                            WriteBatch relatedWriteBatch = db.createWriteBatch();
                            relatedRollingWriteBatch = new RollingWriteBatch(db, relatedWriteBatch);
                            entityUpdates.put(relatedRoundedStartTime, relatedRollingWriteBatch);
                        }
                    }
                    if (relatedRollingWriteBatch == null) {
                        // if no start time is found, add an error and return
                        TimelinePutError error = new TimelinePutError();
                        error.setEntityId(entity.getEntityId());
                        error.setEntityType(entity.getEntityType());
                        error.setErrorCode(TimelinePutError.EXPIRED_ENTITY);
                        response.addError(error);
                        continue;
                    }
                    // This is the existing entity
                    byte[] relatedDomainIdBytes = relatedRollingWriteBatch.getDB().get(createDomainIdKey(relatedEntityId, relatedEntityType, relatedEntityStartTime));
                    // The timeline data created by the server before 2.6 won't have
                    // the domain field. We assume this timeline data is in the
                    // default timeline domain.
                    String domainId = null;
                    if (relatedDomainIdBytes == null) {
                        domainId = TimelineDataManager.DEFAULT_DOMAIN_ID;
                    } else {
                        domainId = new String(relatedDomainIdBytes, UTF_8);
                    }
                    if (!domainId.equals(entity.getDomainId())) {
                        // in this case the entity will be put, but the relation will be
                        // ignored
                        TimelinePutError error = new TimelinePutError();
                        error.setEntityId(entity.getEntityId());
                        error.setEntityType(entity.getEntityType());
                        error.setErrorCode(TimelinePutError.FORBIDDEN_RELATION);
                        response.addError(error);
                        continue;
                    }
                    // write "forward" entry (related entity -> entity)
                    byte[] key = createRelatedEntityKey(relatedEntityId, relatedEntityType, relatedEntityStartTime, entity.getEntityId(), entity.getEntityType());
                    WriteBatch relatedWriteBatch = relatedRollingWriteBatch.getWriteBatch();
                    relatedWriteBatch.put(key, EMPTY_BYTES);
                    ++putCount;
                }
            }
        }
        // write index entities
        RollingWriteBatch indexRollingWriteBatch = indexUpdates.get(roundedStartTime);
        if (indexRollingWriteBatch == null) {
            DB db = indexdb.getDBForStartTime(startTime);
            if (db != null) {
                WriteBatch indexWriteBatch = db.createWriteBatch();
                indexRollingWriteBatch = new RollingWriteBatch(db, indexWriteBatch);
                indexUpdates.put(roundedStartTime, indexRollingWriteBatch);
            }
        }
        if (indexRollingWriteBatch == null) {
            // if no start time is found, add an error and return
            TimelinePutError error = new TimelinePutError();
            error.setEntityId(entity.getEntityId());
            error.setEntityType(entity.getEntityType());
            error.setErrorCode(TimelinePutError.EXPIRED_ENTITY);
            response.addError(error);
            return putCount;
        }
        WriteBatch indexWriteBatch = indexRollingWriteBatch.getWriteBatch();
        putCount += writePrimaryFilterEntries(indexWriteBatch, primaryFilters, markerKey, EMPTY_BYTES);
    } catch (IOException e) {
        LOG.error("Error putting entity " + entity.getEntityId() + " of type " + entity.getEntityType(), e);
        TimelinePutError error = new TimelinePutError();
        error.setEntityId(entity.getEntityId());
        error.setEntityType(entity.getEntityType());
        error.setErrorCode(TimelinePutError.IO_EXCEPTION);
        response.addError(error);
    }
    for (EntityIdentifier relatedEntity : relatedEntitiesWithoutStartTimes) {
        try {
            Long relatedEntityStartAndInsertTime = getAndSetStartTime(relatedEntity.getId(), relatedEntity.getType(), readReverseOrderedLong(revStartTime, 0), null);
            if (relatedEntityStartAndInsertTime == null) {
                throw new IOException("Error setting start time for related entity");
            }
            long relatedStartTimeLong = relatedEntityStartAndInsertTime;
            long relatedRoundedStartTime = entitydb.computeCurrentCheckMillis(relatedStartTimeLong);
            RollingWriteBatch relatedRollingWriteBatch = entityUpdates.get(relatedRoundedStartTime);
            if (relatedRollingWriteBatch == null) {
                DB db = entitydb.getDBForStartTime(relatedStartTimeLong);
                if (db != null) {
                    WriteBatch relatedWriteBatch = db.createWriteBatch();
                    relatedRollingWriteBatch = new RollingWriteBatch(db, relatedWriteBatch);
                    entityUpdates.put(relatedRoundedStartTime, relatedRollingWriteBatch);
                }
            }
            if (relatedRollingWriteBatch == null) {
                // if no start time is found, add an error and return
                TimelinePutError error = new TimelinePutError();
                error.setEntityId(entity.getEntityId());
                error.setEntityType(entity.getEntityType());
                error.setErrorCode(TimelinePutError.EXPIRED_ENTITY);
                response.addError(error);
                continue;
            }
            WriteBatch relatedWriteBatch = relatedRollingWriteBatch.getWriteBatch();
            byte[] relatedEntityStartTime = writeReverseOrderedLong(relatedEntityStartAndInsertTime);
            // This is the new entity, the domain should be the same
            byte[] key = createDomainIdKey(relatedEntity.getId(), relatedEntity.getType(), relatedEntityStartTime);
            relatedWriteBatch.put(key, entity.getDomainId().getBytes(UTF_8));
            ++putCount;
            relatedWriteBatch.put(createRelatedEntityKey(relatedEntity.getId(), relatedEntity.getType(), relatedEntityStartTime, entity.getEntityId(), entity.getEntityType()), EMPTY_BYTES);
            ++putCount;
            relatedWriteBatch.put(createEntityMarkerKey(relatedEntity.getId(), relatedEntity.getType(), relatedEntityStartTime), EMPTY_BYTES);
            ++putCount;
        } catch (IOException e) {
            LOG.error("Error putting related entity " + relatedEntity.getId() + " of type " + relatedEntity.getType() + " for entity " + entity.getEntityId() + " of type " + entity.getEntityType(), e);
            TimelinePutError error = new TimelinePutError();
            error.setEntityId(entity.getEntityId());
            error.setEntityType(entity.getEntityType());
            error.setErrorCode(TimelinePutError.IO_EXCEPTION);
            response.addError(error);
        }
    }
    return putCount;
}
Also used : TimelineEvent(org.apache.hadoop.yarn.api.records.timeline.TimelineEvent) SortedSet(java.util.SortedSet) EnumSet(java.util.EnumSet) Set(java.util.Set) RollingWriteBatch(org.apache.hadoop.yarn.server.timeline.RollingLevelDB.RollingWriteBatch) ArrayList(java.util.ArrayList) IOException(java.io.IOException) TimelinePutError(org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError) GenericObjectMapper.writeReverseOrderedLong(org.apache.hadoop.yarn.server.timeline.GenericObjectMapper.writeReverseOrderedLong) GenericObjectMapper.readReverseOrderedLong(org.apache.hadoop.yarn.server.timeline.GenericObjectMapper.readReverseOrderedLong) RollingWriteBatch(org.apache.hadoop.yarn.server.timeline.RollingLevelDB.RollingWriteBatch) WriteBatch(org.iq80.leveldb.WriteBatch) DB(org.iq80.leveldb.DB)

Aggregations

IOException (java.io.IOException)25 DB (org.iq80.leveldb.DB)25 DBException (org.iq80.leveldb.DBException)20 LeveldbIterator (org.apache.hadoop.yarn.server.utils.LeveldbIterator)16 Options (org.iq80.leveldb.Options)16 File (java.io.File)15 JniDBFactory.asString (org.fusesource.leveldbjni.JniDBFactory.asString)14 WriteBatch (org.iq80.leveldb.WriteBatch)9 DBIterator (org.iq80.leveldb.DBIterator)7 Map (java.util.Map)5 Path (org.apache.hadoop.fs.Path)5 Test (org.junit.Test)5 ImmutableMap (com.google.common.collect.ImmutableMap)4 ImmutableSortedMap (com.google.common.collect.ImmutableSortedMap)4 NavigableMap (java.util.NavigableMap)4 NativeDB (org.fusesource.leveldbjni.internal.NativeDB)4 WriteOptions (org.iq80.leveldb.WriteOptions)4 DB (com.codecademy.eventhub.base.DB)3 Provides (com.google.inject.Provides)3 ArrayList (java.util.ArrayList)3