Search in sources :

Example 1 with Expiration

use of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration in project hadoop by apache.

the class CacheManager method saveState.

public PersistState saveState() throws IOException {
    ArrayList<CachePoolInfoProto> pools = Lists.newArrayListWithCapacity(cachePools.size());
    ArrayList<CacheDirectiveInfoProto> directives = Lists.newArrayListWithCapacity(directivesById.size());
    for (CachePool pool : cachePools.values()) {
        CachePoolInfo p = pool.getInfo(true);
        CachePoolInfoProto.Builder b = CachePoolInfoProto.newBuilder().setPoolName(p.getPoolName());
        if (p.getOwnerName() != null)
            b.setOwnerName(p.getOwnerName());
        if (p.getGroupName() != null)
            b.setGroupName(p.getGroupName());
        if (p.getMode() != null)
            b.setMode(p.getMode().toShort());
        if (p.getLimit() != null)
            b.setLimit(p.getLimit());
        pools.add(b.build());
    }
    for (CacheDirective directive : directivesById.values()) {
        CacheDirectiveInfo info = directive.toInfo();
        CacheDirectiveInfoProto.Builder b = CacheDirectiveInfoProto.newBuilder().setId(info.getId());
        if (info.getPath() != null) {
            b.setPath(info.getPath().toUri().getPath());
        }
        if (info.getReplication() != null) {
            b.setReplication(info.getReplication());
        }
        if (info.getPool() != null) {
            b.setPool(info.getPool());
        }
        Expiration expiry = info.getExpiration();
        if (expiry != null) {
            assert (!expiry.isRelative());
            b.setExpiration(PBHelperClient.convert(expiry));
        }
        directives.add(b.build());
    }
    CacheManagerSection s = CacheManagerSection.newBuilder().setNextDirectiveId(nextDirectiveId).setNumPools(pools.size()).setNumDirectives(directives.size()).build();
    return new PersistState(s, pools, directives);
}
Also used : CachePoolInfoProto(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto) Expiration(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration) CacheDirectiveInfoProto(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto) CacheManagerSection(org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirective) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo)

Example 2 with Expiration

use of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration in project hadoop by apache.

the class CacheManager method validateExpiryTime.

/**
   * Calculates the absolute expiry time of the directive from the
   * {@link CacheDirectiveInfo.Expiration}. This converts a relative Expiration
   * into an absolute time based on the local clock.
   * 
   * @param info to validate.
   * @param maxRelativeExpiryTime of the info's pool.
   * @return the expiration time, or the pool's max absolute expiration if the
   *         info's expiration was not set.
   * @throws InvalidRequestException if the info's Expiration is invalid.
   */
private static long validateExpiryTime(CacheDirectiveInfo info, long maxRelativeExpiryTime) throws InvalidRequestException {
    LOG.trace("Validating directive {} pool maxRelativeExpiryTime {}", info, maxRelativeExpiryTime);
    final long now = new Date().getTime();
    final long maxAbsoluteExpiryTime = now + maxRelativeExpiryTime;
    if (info == null || info.getExpiration() == null) {
        return maxAbsoluteExpiryTime;
    }
    Expiration expiry = info.getExpiration();
    if (expiry.getMillis() < 0l) {
        throw new InvalidRequestException("Cannot set a negative expiration: " + expiry.getMillis());
    }
    long relExpiryTime, absExpiryTime;
    if (expiry.isRelative()) {
        relExpiryTime = expiry.getMillis();
        absExpiryTime = now + relExpiryTime;
    } else {
        absExpiryTime = expiry.getMillis();
        relExpiryTime = absExpiryTime - now;
    }
    // Need to cap the expiry so we don't overflow a long when doing math
    if (relExpiryTime > Expiration.MAX_RELATIVE_EXPIRY_MS) {
        throw new InvalidRequestException("Expiration " + expiry.toString() + " is too far in the future!");
    }
    // Fail if the requested expiry is greater than the max
    if (relExpiryTime > maxRelativeExpiryTime) {
        throw new InvalidRequestException("Expiration " + expiry.toString() + " exceeds the max relative expiration time of " + maxRelativeExpiryTime + " ms.");
    }
    return absExpiryTime;
}
Also used : Expiration(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration) InvalidRequestException(org.apache.hadoop.fs.InvalidRequestException) Date(java.util.Date)

Aggregations

Expiration (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration)2 Date (java.util.Date)1 InvalidRequestException (org.apache.hadoop.fs.InvalidRequestException)1 CacheDirective (org.apache.hadoop.hdfs.protocol.CacheDirective)1 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)1 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)1 CacheDirectiveInfoProto (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto)1 CachePoolInfoProto (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto)1 CacheManagerSection (org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection)1