Search in sources :

Example 1 with CacheEntryWithPayload

use of org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload in project hadoop by apache.

the class NameNodeRpcServer method addCacheDirective.

// ClientProtocol
@Override
public long addCacheDirective(CacheDirectiveInfo path, EnumSet<CacheFlag> flags) throws IOException {
    checkNNStartup();
    namesystem.checkOperation(OperationCategory.WRITE);
    CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
    if (cacheEntry != null && cacheEntry.isSuccess()) {
        return (Long) cacheEntry.getPayload();
    }
    boolean success = false;
    long ret = 0;
    try {
        ret = namesystem.addCacheDirective(path, flags, cacheEntry != null);
        success = true;
    } finally {
        RetryCache.setState(cacheEntry, success, ret);
    }
    return ret;
}
Also used : CacheEntryWithPayload(org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload)

Example 2 with CacheEntryWithPayload

use of org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload in project hadoop by apache.

the class NameNodeRpcServer method create.

// ClientProtocol
@Override
public HdfsFileStatus create(String src, FsPermission masked, String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent, short replication, long blockSize, CryptoProtocolVersion[] supportedVersions) throws IOException {
    checkNNStartup();
    String clientMachine = getClientMachine();
    if (stateChangeLog.isDebugEnabled()) {
        stateChangeLog.debug("*DIR* NameNode.create: file " + src + " for " + clientName + " at " + clientMachine);
    }
    if (!checkPathLength(src)) {
        throw new IOException("create: Pathname too long.  Limit " + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
    }
    namesystem.checkOperation(OperationCategory.WRITE);
    CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
    if (cacheEntry != null && cacheEntry.isSuccess()) {
        return (HdfsFileStatus) cacheEntry.getPayload();
    }
    HdfsFileStatus status = null;
    try {
        PermissionStatus perm = new PermissionStatus(getRemoteUser().getShortUserName(), null, masked);
        status = namesystem.startFile(src, perm, clientName, clientMachine, flag.get(), createParent, replication, blockSize, supportedVersions, cacheEntry != null);
    } finally {
        RetryCache.setState(cacheEntry, status != null, status);
    }
    metrics.incrFilesCreated();
    metrics.incrCreateFileOps();
    return status;
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) IOException(java.io.IOException) CacheEntryWithPayload(org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus)

Example 3 with CacheEntryWithPayload

use of org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload in project hadoop by apache.

the class NameNodeRpcServer method startCheckpoint.

// NamenodeProtocol
@Override
public NamenodeCommand startCheckpoint(NamenodeRegistration registration) throws IOException {
    checkNNStartup();
    namesystem.checkSuperuserPrivilege();
    verifyRequest(registration);
    if (!nn.isRole(NamenodeRole.NAMENODE))
        throw new IOException("Only an ACTIVE node can invoke startCheckpoint.");
    CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
    if (cacheEntry != null && cacheEntry.isSuccess()) {
        return (NamenodeCommand) cacheEntry.getPayload();
    }
    NamenodeCommand ret = null;
    try {
        ret = namesystem.startCheckpoint(registration, nn.setRegistration());
    } finally {
        RetryCache.setState(cacheEntry, ret != null, ret);
    }
    return ret;
}
Also used : IOException(java.io.IOException) CacheEntryWithPayload(org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload) NamenodeCommand(org.apache.hadoop.hdfs.server.protocol.NamenodeCommand)

Example 4 with CacheEntryWithPayload

use of org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload in project hadoop by apache.

the class NameNodeRpcServer method createSnapshot.

@Override
public String createSnapshot(String snapshotRoot, String snapshotName) throws IOException {
    checkNNStartup();
    if (!checkPathLength(snapshotRoot)) {
        throw new IOException("createSnapshot: Pathname too long.  Limit " + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
    }
    namesystem.checkOperation(OperationCategory.WRITE);
    CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
    if (cacheEntry != null && cacheEntry.isSuccess()) {
        return (String) cacheEntry.getPayload();
    }
    metrics.incrCreateSnapshotOps();
    String ret = null;
    try {
        ret = namesystem.createSnapshot(snapshotRoot, snapshotName, cacheEntry != null);
    } finally {
        RetryCache.setState(cacheEntry, ret != null, ret);
    }
    return ret;
}
Also used : IOException(java.io.IOException) CacheEntryWithPayload(org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload)

Example 5 with CacheEntryWithPayload

use of org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload in project hadoop by apache.

the class NameNodeRpcServer method append.

// ClientProtocol
@Override
public LastBlockWithStatus append(String src, String clientName, EnumSetWritable<CreateFlag> flag) throws IOException {
    checkNNStartup();
    String clientMachine = getClientMachine();
    if (stateChangeLog.isDebugEnabled()) {
        stateChangeLog.debug("*DIR* NameNode.append: file " + src + " for " + clientName + " at " + clientMachine);
    }
    namesystem.checkOperation(OperationCategory.WRITE);
    CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
    if (cacheEntry != null && cacheEntry.isSuccess()) {
        return (LastBlockWithStatus) cacheEntry.getPayload();
    }
    LastBlockWithStatus info = null;
    boolean success = false;
    try {
        info = namesystem.appendFile(src, clientName, clientMachine, flag.get(), cacheEntry != null);
        success = true;
    } finally {
        RetryCache.setState(cacheEntry, success, info);
    }
    metrics.incrFilesAppended();
    return info;
}
Also used : LastBlockWithStatus(org.apache.hadoop.hdfs.protocol.LastBlockWithStatus) CacheEntryWithPayload(org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload)

Aggregations

CacheEntryWithPayload (org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload)5 IOException (java.io.IOException)3 PermissionStatus (org.apache.hadoop.fs.permission.PermissionStatus)1 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)1 LastBlockWithStatus (org.apache.hadoop.hdfs.protocol.LastBlockWithStatus)1 NamenodeCommand (org.apache.hadoop.hdfs.server.protocol.NamenodeCommand)1