use of org.apache.hadoop.hdfs.protocol.CacheDirective in project hadoop by apache.
the class CacheManager method listCacheDirectives.
public BatchedListEntries<CacheDirectiveEntry> listCacheDirectives(long prevId, CacheDirectiveInfo filter, FSPermissionChecker pc) throws IOException {
assert namesystem.hasReadLock();
final int NUM_PRE_ALLOCATED_ENTRIES = 16;
String filterPath = null;
if (filter.getPath() != null) {
filterPath = validatePath(filter);
}
if (filter.getReplication() != null) {
throw new InvalidRequestException("Filtering by replication is unsupported.");
}
// Querying for a single ID
final Long id = filter.getId();
if (id != null) {
if (!directivesById.containsKey(id)) {
throw new InvalidRequestException("Did not find requested id " + id);
}
// Since we use a tailMap on directivesById, setting prev to id-1 gets
// us the directive with the id (if present)
prevId = id - 1;
}
ArrayList<CacheDirectiveEntry> replies = new ArrayList<CacheDirectiveEntry>(NUM_PRE_ALLOCATED_ENTRIES);
int numReplies = 0;
SortedMap<Long, CacheDirective> tailMap = directivesById.tailMap(prevId + 1);
for (Entry<Long, CacheDirective> cur : tailMap.entrySet()) {
if (numReplies >= maxListCacheDirectivesNumResponses) {
return new BatchedListEntries<CacheDirectiveEntry>(replies, true);
}
CacheDirective curDirective = cur.getValue();
CacheDirectiveInfo info = cur.getValue().toInfo();
// item and should break out.
if (id != null && !(info.getId().equals(id))) {
break;
}
if (filter.getPool() != null && !info.getPool().equals(filter.getPool())) {
continue;
}
if (filterPath != null && !info.getPath().toUri().getPath().equals(filterPath)) {
continue;
}
boolean hasPermission = true;
if (pc != null) {
try {
pc.checkPermission(curDirective.getPool(), FsAction.READ);
} catch (AccessControlException e) {
hasPermission = false;
}
}
if (hasPermission) {
replies.add(new CacheDirectiveEntry(info, cur.getValue().toStats()));
numReplies++;
}
}
return new BatchedListEntries<CacheDirectiveEntry>(replies, false);
}
use of org.apache.hadoop.hdfs.protocol.CacheDirective in project hadoop by apache.
the class CacheManager method removeDirective.
public void removeDirective(long id, FSPermissionChecker pc) throws IOException {
assert namesystem.hasWriteLock();
try {
CacheDirective directive = getById(id);
checkWritePermission(pc, directive.getPool());
removeInternal(directive);
} catch (IOException e) {
LOG.warn("removeDirective of " + id + " failed: ", e);
throw e;
}
LOG.info("removeDirective of " + id + " successful.");
}
use of org.apache.hadoop.hdfs.protocol.CacheDirective in project hadoop by apache.
the class CacheManager method modifyDirective.
public void modifyDirective(CacheDirectiveInfo info, FSPermissionChecker pc, EnumSet<CacheFlag> flags) throws IOException {
assert namesystem.hasWriteLock();
String idString = (info.getId() == null) ? "(null)" : info.getId().toString();
try {
// Check for invalid IDs.
Long id = info.getId();
if (id == null) {
throw new InvalidRequestException("Must supply an ID.");
}
CacheDirective prevEntry = getById(id);
checkWritePermission(pc, prevEntry.getPool());
// Fill in defaults
CacheDirectiveInfo infoWithDefaults = createFromInfoAndDefaults(info, prevEntry);
CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder(infoWithDefaults);
// Do validation
validatePath(infoWithDefaults);
validateReplication(infoWithDefaults, (short) -1);
// Need to test the pool being set here to avoid rejecting a modify for a
// directive that's already been forced into a pool
CachePool srcPool = prevEntry.getPool();
CachePool destPool = getCachePool(validatePoolName(infoWithDefaults));
if (!srcPool.getPoolName().equals(destPool.getPoolName())) {
checkWritePermission(pc, destPool);
if (!flags.contains(CacheFlag.FORCE)) {
checkLimit(destPool, infoWithDefaults.getPath().toUri().getPath(), infoWithDefaults.getReplication());
}
}
// Verify the expiration against the destination pool
validateExpiryTime(infoWithDefaults, destPool.getMaxRelativeExpiryMs());
// Indicate changes to the CRM
setNeedsRescan();
// Validation passed
removeInternal(prevEntry);
addInternal(new CacheDirective(builder.build()), destPool);
} catch (IOException e) {
LOG.warn("modifyDirective of " + idString + " failed: ", e);
throw e;
}
LOG.info("modifyDirective of {} successfully applied {}.", idString, info);
}
use of org.apache.hadoop.hdfs.protocol.CacheDirective in project hadoop by apache.
the class CacheManager method addDirective.
public CacheDirectiveInfo addDirective(CacheDirectiveInfo info, FSPermissionChecker pc, EnumSet<CacheFlag> flags) throws IOException {
assert namesystem.hasWriteLock();
CacheDirective directive;
try {
CachePool pool = getCachePool(validatePoolName(info));
checkWritePermission(pc, pool);
String path = validatePath(info);
short replication = validateReplication(info, pool.getDefaultReplication());
long expiryTime = validateExpiryTime(info, pool.getMaxRelativeExpiryMs());
// Do quota validation if required
if (!flags.contains(CacheFlag.FORCE)) {
checkLimit(pool, path, replication);
}
// All validation passed
// Add a new entry with the next available ID.
long id = getNextDirectiveId();
directive = new CacheDirective(id, path, replication, expiryTime);
addInternal(directive, pool);
} catch (IOException e) {
LOG.warn("addDirective of " + info + " failed: ", e);
throw e;
}
LOG.info("addDirective of {} successful.", info);
return directive.toInfo();
}
use of org.apache.hadoop.hdfs.protocol.CacheDirective in project hadoop by apache.
the class CacheManager method removeCachePool.
/**
* Remove a cache pool.
*
* Only the superuser should be able to call this function.
*
* @param poolName
* The name for the cache pool to remove.
*/
public void removeCachePool(String poolName) throws IOException {
assert namesystem.hasWriteLock();
try {
CachePoolInfo.validateName(poolName);
CachePool pool = cachePools.remove(poolName);
if (pool == null) {
throw new InvalidRequestException("Cannot remove non-existent cache pool " + poolName);
}
// Remove all directives in this pool.
Iterator<CacheDirective> iter = pool.getDirectiveList().iterator();
while (iter.hasNext()) {
CacheDirective directive = iter.next();
directivesByPath.remove(directive.getPath());
directivesById.remove(directive.getId());
iter.remove();
}
setNeedsRescan();
} catch (IOException e) {
LOG.info("removeCachePool of " + poolName + " failed: ", e);
throw e;
}
LOG.info("removeCachePool of " + poolName + " successful.");
}
Aggregations