use of org.apache.hadoop.hdfs.protocol.CacheDirective in project hadoop by apache.
the class CacheManager method saveState.
public PersistState saveState() throws IOException {
ArrayList<CachePoolInfoProto> pools = Lists.newArrayListWithCapacity(cachePools.size());
ArrayList<CacheDirectiveInfoProto> directives = Lists.newArrayListWithCapacity(directivesById.size());
for (CachePool pool : cachePools.values()) {
CachePoolInfo p = pool.getInfo(true);
CachePoolInfoProto.Builder b = CachePoolInfoProto.newBuilder().setPoolName(p.getPoolName());
if (p.getOwnerName() != null)
b.setOwnerName(p.getOwnerName());
if (p.getGroupName() != null)
b.setGroupName(p.getGroupName());
if (p.getMode() != null)
b.setMode(p.getMode().toShort());
if (p.getLimit() != null)
b.setLimit(p.getLimit());
pools.add(b.build());
}
for (CacheDirective directive : directivesById.values()) {
CacheDirectiveInfo info = directive.toInfo();
CacheDirectiveInfoProto.Builder b = CacheDirectiveInfoProto.newBuilder().setId(info.getId());
if (info.getPath() != null) {
b.setPath(info.getPath().toUri().getPath());
}
if (info.getReplication() != null) {
b.setReplication(info.getReplication());
}
if (info.getPool() != null) {
b.setPool(info.getPool());
}
Expiration expiry = info.getExpiration();
if (expiry != null) {
assert (!expiry.isRelative());
b.setExpiration(PBHelperClient.convert(expiry));
}
directives.add(b.build());
}
CacheManagerSection s = CacheManagerSection.newBuilder().setNextDirectiveId(nextDirectiveId).setNumPools(pools.size()).setNumDirectives(directives.size()).build();
return new PersistState(s, pools, directives);
}
Aggregations