use of org.apache.hadoop.fs.InvalidRequestException in project hadoop by apache.
the class CacheManager method listCacheDirectives.
public BatchedListEntries<CacheDirectiveEntry> listCacheDirectives(long prevId, CacheDirectiveInfo filter, FSPermissionChecker pc) throws IOException {
assert namesystem.hasReadLock();
final int NUM_PRE_ALLOCATED_ENTRIES = 16;
String filterPath = null;
if (filter.getPath() != null) {
filterPath = validatePath(filter);
}
if (filter.getReplication() != null) {
throw new InvalidRequestException("Filtering by replication is unsupported.");
}
// Querying for a single ID
final Long id = filter.getId();
if (id != null) {
if (!directivesById.containsKey(id)) {
throw new InvalidRequestException("Did not find requested id " + id);
}
// Since we use a tailMap on directivesById, setting prev to id-1 gets
// us the directive with the id (if present)
prevId = id - 1;
}
ArrayList<CacheDirectiveEntry> replies = new ArrayList<CacheDirectiveEntry>(NUM_PRE_ALLOCATED_ENTRIES);
int numReplies = 0;
SortedMap<Long, CacheDirective> tailMap = directivesById.tailMap(prevId + 1);
for (Entry<Long, CacheDirective> cur : tailMap.entrySet()) {
if (numReplies >= maxListCacheDirectivesNumResponses) {
return new BatchedListEntries<CacheDirectiveEntry>(replies, true);
}
CacheDirective curDirective = cur.getValue();
CacheDirectiveInfo info = cur.getValue().toInfo();
// item and should break out.
if (id != null && !(info.getId().equals(id))) {
break;
}
if (filter.getPool() != null && !info.getPool().equals(filter.getPool())) {
continue;
}
if (filterPath != null && !info.getPath().toUri().getPath().equals(filterPath)) {
continue;
}
boolean hasPermission = true;
if (pc != null) {
try {
pc.checkPermission(curDirective.getPool(), FsAction.READ);
} catch (AccessControlException e) {
hasPermission = false;
}
}
if (hasPermission) {
replies.add(new CacheDirectiveEntry(info, cur.getValue().toStats()));
numReplies++;
}
}
return new BatchedListEntries<CacheDirectiveEntry>(replies, false);
}
use of org.apache.hadoop.fs.InvalidRequestException in project hadoop by apache.
the class CacheManager method modifyDirective.
public void modifyDirective(CacheDirectiveInfo info, FSPermissionChecker pc, EnumSet<CacheFlag> flags) throws IOException {
assert namesystem.hasWriteLock();
String idString = (info.getId() == null) ? "(null)" : info.getId().toString();
try {
// Check for invalid IDs.
Long id = info.getId();
if (id == null) {
throw new InvalidRequestException("Must supply an ID.");
}
CacheDirective prevEntry = getById(id);
checkWritePermission(pc, prevEntry.getPool());
// Fill in defaults
CacheDirectiveInfo infoWithDefaults = createFromInfoAndDefaults(info, prevEntry);
CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder(infoWithDefaults);
// Do validation
validatePath(infoWithDefaults);
validateReplication(infoWithDefaults, (short) -1);
// Need to test the pool being set here to avoid rejecting a modify for a
// directive that's already been forced into a pool
CachePool srcPool = prevEntry.getPool();
CachePool destPool = getCachePool(validatePoolName(infoWithDefaults));
if (!srcPool.getPoolName().equals(destPool.getPoolName())) {
checkWritePermission(pc, destPool);
if (!flags.contains(CacheFlag.FORCE)) {
checkLimit(destPool, infoWithDefaults.getPath().toUri().getPath(), infoWithDefaults.getReplication());
}
}
// Verify the expiration against the destination pool
validateExpiryTime(infoWithDefaults, destPool.getMaxRelativeExpiryMs());
// Indicate changes to the CRM
setNeedsRescan();
// Validation passed
removeInternal(prevEntry);
addInternal(new CacheDirective(builder.build()), destPool);
} catch (IOException e) {
LOG.warn("modifyDirective of " + idString + " failed: ", e);
throw e;
}
LOG.info("modifyDirective of {} successfully applied {}.", idString, info);
}
use of org.apache.hadoop.fs.InvalidRequestException in project hadoop by apache.
the class CacheManager method removeCachePool.
/**
* Remove a cache pool.
*
* Only the superuser should be able to call this function.
*
* @param poolName
* The name for the cache pool to remove.
*/
public void removeCachePool(String poolName) throws IOException {
assert namesystem.hasWriteLock();
try {
CachePoolInfo.validateName(poolName);
CachePool pool = cachePools.remove(poolName);
if (pool == null) {
throw new InvalidRequestException("Cannot remove non-existent cache pool " + poolName);
}
// Remove all directives in this pool.
Iterator<CacheDirective> iter = pool.getDirectiveList().iterator();
while (iter.hasNext()) {
CacheDirective directive = iter.next();
directivesByPath.remove(directive.getPath());
directivesById.remove(directive.getId());
iter.remove();
}
setNeedsRescan();
} catch (IOException e) {
LOG.info("removeCachePool of " + poolName + " failed: ", e);
throw e;
}
LOG.info("removeCachePool of " + poolName + " successful.");
}
use of org.apache.hadoop.fs.InvalidRequestException in project hadoop by apache.
the class CacheManager method modifyDirectiveFromEditLog.
/**
* Modifies a directive, skipping most error checking. This is for careful
* internal use only. modifyDirective can be non-deterministic since its error
* checking depends on current system time, which poses a problem for edit log
* replay.
*/
void modifyDirectiveFromEditLog(CacheDirectiveInfo info) throws InvalidRequestException {
// Check for invalid IDs.
Long id = info.getId();
if (id == null) {
throw new InvalidRequestException("Must supply an ID.");
}
CacheDirective prevEntry = getById(id);
CacheDirectiveInfo newInfo = createFromInfoAndDefaults(info, prevEntry);
removeInternal(prevEntry);
addInternal(new CacheDirective(newInfo), getCachePool(newInfo.getPool()));
}
use of org.apache.hadoop.fs.InvalidRequestException in project hadoop by apache.
the class ShortCircuitRegistry method registerSlot.
public synchronized void registerSlot(ExtendedBlockId blockId, SlotId slotId, boolean isCached) throws InvalidRequestException {
if (!enabled) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + " can't register a slot because the " + "ShortCircuitRegistry is not enabled.");
}
throw new UnsupportedOperationException();
}
ShmId shmId = slotId.getShmId();
RegisteredShm shm = segments.get(shmId);
if (shm == null) {
throw new InvalidRequestException("there is no shared memory segment " + "registered with shmId " + shmId);
}
Slot slot = shm.registerSlot(slotId.getSlotIdx(), blockId);
if (isCached) {
slot.makeAnchorable();
} else {
slot.makeUnanchorable();
}
boolean added = slots.put(blockId, slot);
Preconditions.checkState(added);
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": registered " + blockId + " with slot " + slotId + " (isCached=" + isCached + ")");
}
}
Aggregations