use of org.apache.hadoop.hdfs.protocol.CacheDirective in project hadoop by apache.
the class CacheManager method modifyDirectiveFromEditLog.
/**
* Modifies a directive, skipping most error checking. This is for careful
* internal use only. modifyDirective can be non-deterministic since its error
* checking depends on current system time, which poses a problem for edit log
* replay.
*/
void modifyDirectiveFromEditLog(CacheDirectiveInfo info) throws InvalidRequestException {
// Check for invalid IDs.
Long id = info.getId();
if (id == null) {
throw new InvalidRequestException("Must supply an ID.");
}
CacheDirective prevEntry = getById(id);
CacheDirectiveInfo newInfo = createFromInfoAndDefaults(info, prevEntry);
removeInternal(prevEntry);
addInternal(new CacheDirective(newInfo), getCachePool(newInfo.getPool()));
}
use of org.apache.hadoop.hdfs.protocol.CacheDirective in project hadoop by apache.
the class CacheReplicationMonitor method rescanCacheDirectives.
/**
* Scan all CacheDirectives. Use the information to figure out
* what cache replication factor each block should have.
*/
private void rescanCacheDirectives() {
FSDirectory fsDir = namesystem.getFSDirectory();
final long now = new Date().getTime();
for (CacheDirective directive : cacheManager.getCacheDirectives()) {
scannedDirectives++;
// Skip processing this entry if it has expired
if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
LOG.debug("Directive {}: the directive expired at {} (now = {})", directive.getId(), directive.getExpiryTime(), now);
continue;
}
String path = directive.getPath();
INode node;
try {
node = fsDir.getINode(path, DirOp.READ);
} catch (IOException e) {
// We don't cache through symlinks or invalid paths
LOG.debug("Directive {}: Failed to resolve path {} ({})", directive.getId(), path, e.getMessage());
continue;
}
if (node == null) {
LOG.debug("Directive {}: No inode found at {}", directive.getId(), path);
} else if (node.isDirectory()) {
INodeDirectory dir = node.asDirectory();
ReadOnlyList<INode> children = dir.getChildrenList(Snapshot.CURRENT_STATE_ID);
for (INode child : children) {
if (child.isFile()) {
rescanFile(directive, child.asFile());
}
}
} else if (node.isFile()) {
rescanFile(directive, node.asFile());
} else {
LOG.debug("Directive {}: ignoring non-directive, non-file inode {} ", directive.getId(), node);
}
}
}
use of org.apache.hadoop.hdfs.protocol.CacheDirective in project hadoop by apache.
the class CacheManager method addDirectiveFromEditLog.
/**
* Adds a directive, skipping most error checking. This should only be called
* internally in special scenarios like edit log replay.
*/
CacheDirectiveInfo addDirectiveFromEditLog(CacheDirectiveInfo directive) throws InvalidRequestException {
long id = directive.getId();
CacheDirective entry = new CacheDirective(directive);
CachePool pool = cachePools.get(directive.getPool());
addInternal(entry, pool);
if (nextDirectiveId <= id) {
nextDirectiveId = id + 1;
}
return entry.toInfo();
}
use of org.apache.hadoop.hdfs.protocol.CacheDirective in project hadoop by apache.
the class CacheManager method addInternal.
// RPC handlers
private void addInternal(CacheDirective directive, CachePool pool) {
boolean addedDirective = pool.getDirectiveList().add(directive);
assert addedDirective;
directivesById.put(directive.getId(), directive);
String path = directive.getPath();
List<CacheDirective> directives = directivesByPath.get(path);
if (directives == null) {
directives = new ArrayList<CacheDirective>(1);
directivesByPath.put(path, directives);
}
directives.add(directive);
// Fix up pool stats
CacheDirectiveStats stats = computeNeeded(directive.getPath(), directive.getReplication());
directive.addBytesNeeded(stats.getBytesNeeded());
directive.addFilesNeeded(directive.getFilesNeeded());
setNeedsRescan();
}
use of org.apache.hadoop.hdfs.protocol.CacheDirective in project hadoop by apache.
the class CacheManager method loadState.
public void loadState(PersistState s) throws IOException {
nextDirectiveId = s.section.getNextDirectiveId();
for (CachePoolInfoProto p : s.pools) {
CachePoolInfo info = new CachePoolInfo(p.getPoolName());
if (p.hasOwnerName())
info.setOwnerName(p.getOwnerName());
if (p.hasGroupName())
info.setGroupName(p.getGroupName());
if (p.hasMode())
info.setMode(new FsPermission((short) p.getMode()));
if (p.hasDefaultReplication()) {
info.setDefaultReplication((short) p.getDefaultReplication());
}
if (p.hasLimit())
info.setLimit(p.getLimit());
addCachePool(info);
}
for (CacheDirectiveInfoProto p : s.directives) {
// Get pool reference by looking it up in the map
final String poolName = p.getPool();
CacheDirective directive = new CacheDirective(p.getId(), new Path(p.getPath()).toUri().getPath(), (short) p.getReplication(), p.getExpiration().getMillis());
addCacheDirective(poolName, directive);
}
}
Aggregations