Search in sources :

Example 1 with InvalidRequestException

use of org.apache.hadoop.fs.InvalidRequestException in project hadoop by apache.

the class CacheManager method listCacheDirectives.

public BatchedListEntries<CacheDirectiveEntry> listCacheDirectives(long prevId, CacheDirectiveInfo filter, FSPermissionChecker pc) throws IOException {
    assert namesystem.hasReadLock();
    final int NUM_PRE_ALLOCATED_ENTRIES = 16;
    String filterPath = null;
    if (filter.getPath() != null) {
        filterPath = validatePath(filter);
    }
    if (filter.getReplication() != null) {
        throw new InvalidRequestException("Filtering by replication is unsupported.");
    }
    // Querying for a single ID
    final Long id = filter.getId();
    if (id != null) {
        if (!directivesById.containsKey(id)) {
            throw new InvalidRequestException("Did not find requested id " + id);
        }
        // Since we use a tailMap on directivesById, setting prev to id-1 gets
        // us the directive with the id (if present)
        prevId = id - 1;
    }
    ArrayList<CacheDirectiveEntry> replies = new ArrayList<CacheDirectiveEntry>(NUM_PRE_ALLOCATED_ENTRIES);
    int numReplies = 0;
    SortedMap<Long, CacheDirective> tailMap = directivesById.tailMap(prevId + 1);
    for (Entry<Long, CacheDirective> cur : tailMap.entrySet()) {
        if (numReplies >= maxListCacheDirectivesNumResponses) {
            return new BatchedListEntries<CacheDirectiveEntry>(replies, true);
        }
        CacheDirective curDirective = cur.getValue();
        CacheDirectiveInfo info = cur.getValue().toInfo();
        // item and should break out.
        if (id != null && !(info.getId().equals(id))) {
            break;
        }
        if (filter.getPool() != null && !info.getPool().equals(filter.getPool())) {
            continue;
        }
        if (filterPath != null && !info.getPath().toUri().getPath().equals(filterPath)) {
            continue;
        }
        boolean hasPermission = true;
        if (pc != null) {
            try {
                pc.checkPermission(curDirective.getPool(), FsAction.READ);
            } catch (AccessControlException e) {
                hasPermission = false;
            }
        }
        if (hasPermission) {
            replies.add(new CacheDirectiveEntry(info, cur.getValue().toStats()));
            numReplies++;
        }
    }
    return new BatchedListEntries<CacheDirectiveEntry>(replies, false);
}
Also used : ArrayList(java.util.ArrayList) AccessControlException(org.apache.hadoop.security.AccessControlException) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirective) BatchedListEntries(org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries) CacheDirectiveEntry(org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry) InvalidRequestException(org.apache.hadoop.fs.InvalidRequestException)

Example 2 with InvalidRequestException

use of org.apache.hadoop.fs.InvalidRequestException in project hadoop by apache.

the class CacheManager method modifyDirective.

public void modifyDirective(CacheDirectiveInfo info, FSPermissionChecker pc, EnumSet<CacheFlag> flags) throws IOException {
    assert namesystem.hasWriteLock();
    String idString = (info.getId() == null) ? "(null)" : info.getId().toString();
    try {
        // Check for invalid IDs.
        Long id = info.getId();
        if (id == null) {
            throw new InvalidRequestException("Must supply an ID.");
        }
        CacheDirective prevEntry = getById(id);
        checkWritePermission(pc, prevEntry.getPool());
        // Fill in defaults
        CacheDirectiveInfo infoWithDefaults = createFromInfoAndDefaults(info, prevEntry);
        CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder(infoWithDefaults);
        // Do validation
        validatePath(infoWithDefaults);
        validateReplication(infoWithDefaults, (short) -1);
        // Need to test the pool being set here to avoid rejecting a modify for a
        // directive that's already been forced into a pool
        CachePool srcPool = prevEntry.getPool();
        CachePool destPool = getCachePool(validatePoolName(infoWithDefaults));
        if (!srcPool.getPoolName().equals(destPool.getPoolName())) {
            checkWritePermission(pc, destPool);
            if (!flags.contains(CacheFlag.FORCE)) {
                checkLimit(destPool, infoWithDefaults.getPath().toUri().getPath(), infoWithDefaults.getReplication());
            }
        }
        // Verify the expiration against the destination pool
        validateExpiryTime(infoWithDefaults, destPool.getMaxRelativeExpiryMs());
        // Indicate changes to the CRM
        setNeedsRescan();
        // Validation passed
        removeInternal(prevEntry);
        addInternal(new CacheDirective(builder.build()), destPool);
    } catch (IOException e) {
        LOG.warn("modifyDirective of " + idString + " failed: ", e);
        throw e;
    }
    LOG.info("modifyDirective of {} successfully applied {}.", idString, info);
}
Also used : CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirective) InvalidRequestException(org.apache.hadoop.fs.InvalidRequestException) IOException(java.io.IOException)

Example 3 with InvalidRequestException

use of org.apache.hadoop.fs.InvalidRequestException in project hadoop by apache.

the class CacheManager method removeCachePool.

/**
   * Remove a cache pool.
   * 
   * Only the superuser should be able to call this function.
   *
   * @param poolName
   *          The name for the cache pool to remove.
   */
public void removeCachePool(String poolName) throws IOException {
    assert namesystem.hasWriteLock();
    try {
        CachePoolInfo.validateName(poolName);
        CachePool pool = cachePools.remove(poolName);
        if (pool == null) {
            throw new InvalidRequestException("Cannot remove non-existent cache pool " + poolName);
        }
        // Remove all directives in this pool.
        Iterator<CacheDirective> iter = pool.getDirectiveList().iterator();
        while (iter.hasNext()) {
            CacheDirective directive = iter.next();
            directivesByPath.remove(directive.getPath());
            directivesById.remove(directive.getId());
            iter.remove();
        }
        setNeedsRescan();
    } catch (IOException e) {
        LOG.info("removeCachePool of " + poolName + " failed: ", e);
        throw e;
    }
    LOG.info("removeCachePool of " + poolName + " successful.");
}
Also used : CacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirective) InvalidRequestException(org.apache.hadoop.fs.InvalidRequestException) IOException(java.io.IOException)

Example 4 with InvalidRequestException

use of org.apache.hadoop.fs.InvalidRequestException in project hadoop by apache.

the class CacheManager method modifyDirectiveFromEditLog.

/**
   * Modifies a directive, skipping most error checking. This is for careful
   * internal use only. modifyDirective can be non-deterministic since its error
   * checking depends on current system time, which poses a problem for edit log
   * replay.
   */
void modifyDirectiveFromEditLog(CacheDirectiveInfo info) throws InvalidRequestException {
    // Check for invalid IDs.
    Long id = info.getId();
    if (id == null) {
        throw new InvalidRequestException("Must supply an ID.");
    }
    CacheDirective prevEntry = getById(id);
    CacheDirectiveInfo newInfo = createFromInfoAndDefaults(info, prevEntry);
    removeInternal(prevEntry);
    addInternal(new CacheDirective(newInfo), getCachePool(newInfo.getPool()));
}
Also used : CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirective) InvalidRequestException(org.apache.hadoop.fs.InvalidRequestException)

Example 5 with InvalidRequestException

use of org.apache.hadoop.fs.InvalidRequestException in project hadoop by apache.

the class ShortCircuitRegistry method registerSlot.

public synchronized void registerSlot(ExtendedBlockId blockId, SlotId slotId, boolean isCached) throws InvalidRequestException {
    if (!enabled) {
        if (LOG.isTraceEnabled()) {
            LOG.trace(this + " can't register a slot because the " + "ShortCircuitRegistry is not enabled.");
        }
        throw new UnsupportedOperationException();
    }
    ShmId shmId = slotId.getShmId();
    RegisteredShm shm = segments.get(shmId);
    if (shm == null) {
        throw new InvalidRequestException("there is no shared memory segment " + "registered with shmId " + shmId);
    }
    Slot slot = shm.registerSlot(slotId.getSlotIdx(), blockId);
    if (isCached) {
        slot.makeAnchorable();
    } else {
        slot.makeUnanchorable();
    }
    boolean added = slots.put(blockId, slot);
    Preconditions.checkState(added);
    if (LOG.isTraceEnabled()) {
        LOG.trace(this + ": registered " + blockId + " with slot " + slotId + " (isCached=" + isCached + ")");
    }
}
Also used : ShmId(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId) Slot(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot) InvalidRequestException(org.apache.hadoop.fs.InvalidRequestException)

Aggregations

InvalidRequestException (org.apache.hadoop.fs.InvalidRequestException)11 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)6 Path (org.apache.hadoop.fs.Path)4 CacheDirective (org.apache.hadoop.hdfs.protocol.CacheDirective)4 CacheDirectiveEntry (org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry)4 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)4 Test (org.junit.Test)4 IOException (java.io.IOException)3 Date (java.util.Date)3 CachePoolEntry (org.apache.hadoop.hdfs.protocol.CachePoolEntry)2 ShmId (org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId)2 Slot (org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot)2 AccessControlException (org.apache.hadoop.security.AccessControlException)2 ArrayList (java.util.ArrayList)1 BatchedListEntries (org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries)1 CacheFlag (org.apache.hadoop.fs.CacheFlag)1 FsPermission (org.apache.hadoop.fs.permission.FsPermission)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 Expiration (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration)1 CachePoolStats (org.apache.hadoop.hdfs.protocol.CachePoolStats)1