use of org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload in project hadoop by apache.
the class NameNodeRpcServer method addCacheDirective.
// ClientProtocol
@Override
public long addCacheDirective(CacheDirectiveInfo path, EnumSet<CacheFlag> flags) throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return (Long) cacheEntry.getPayload();
}
boolean success = false;
long ret = 0;
try {
ret = namesystem.addCacheDirective(path, flags, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success, ret);
}
return ret;
}
use of org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload in project hadoop by apache.
the class NameNodeRpcServer method create.
// ClientProtocol
@Override
public HdfsFileStatus create(String src, FsPermission masked, String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent, short replication, long blockSize, CryptoProtocolVersion[] supportedVersions) throws IOException {
checkNNStartup();
String clientMachine = getClientMachine();
if (stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* NameNode.create: file " + src + " for " + clientName + " at " + clientMachine);
}
if (!checkPathLength(src)) {
throw new IOException("create: Pathname too long. Limit " + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return (HdfsFileStatus) cacheEntry.getPayload();
}
HdfsFileStatus status = null;
try {
PermissionStatus perm = new PermissionStatus(getRemoteUser().getShortUserName(), null, masked);
status = namesystem.startFile(src, perm, clientName, clientMachine, flag.get(), createParent, replication, blockSize, supportedVersions, cacheEntry != null);
} finally {
RetryCache.setState(cacheEntry, status != null, status);
}
metrics.incrFilesCreated();
metrics.incrCreateFileOps();
return status;
}
use of org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload in project hadoop by apache.
the class NameNodeRpcServer method startCheckpoint.
// NamenodeProtocol
@Override
public NamenodeCommand startCheckpoint(NamenodeRegistration registration) throws IOException {
checkNNStartup();
namesystem.checkSuperuserPrivilege();
verifyRequest(registration);
if (!nn.isRole(NamenodeRole.NAMENODE))
throw new IOException("Only an ACTIVE node can invoke startCheckpoint.");
CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return (NamenodeCommand) cacheEntry.getPayload();
}
NamenodeCommand ret = null;
try {
ret = namesystem.startCheckpoint(registration, nn.setRegistration());
} finally {
RetryCache.setState(cacheEntry, ret != null, ret);
}
return ret;
}
use of org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload in project hadoop by apache.
the class NameNodeRpcServer method createSnapshot.
@Override
public String createSnapshot(String snapshotRoot, String snapshotName) throws IOException {
checkNNStartup();
if (!checkPathLength(snapshotRoot)) {
throw new IOException("createSnapshot: Pathname too long. Limit " + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return (String) cacheEntry.getPayload();
}
metrics.incrCreateSnapshotOps();
String ret = null;
try {
ret = namesystem.createSnapshot(snapshotRoot, snapshotName, cacheEntry != null);
} finally {
RetryCache.setState(cacheEntry, ret != null, ret);
}
return ret;
}
use of org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload in project hadoop by apache.
the class NameNodeRpcServer method append.
// ClientProtocol
@Override
public LastBlockWithStatus append(String src, String clientName, EnumSetWritable<CreateFlag> flag) throws IOException {
checkNNStartup();
String clientMachine = getClientMachine();
if (stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* NameNode.append: file " + src + " for " + clientName + " at " + clientMachine);
}
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return (LastBlockWithStatus) cacheEntry.getPayload();
}
LastBlockWithStatus info = null;
boolean success = false;
try {
info = namesystem.appendFile(src, clientName, clientMachine, flag.get(), cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success, info);
}
metrics.incrFilesAppended();
return info;
}
Aggregations