use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.
the class RpcProgramNfs3 method pathconf.
@VisibleForTesting
PATHCONF3Response pathconf(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
PATHCONF3Response response = new PATHCONF3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
PATHCONF3Request request;
try {
request = PATHCONF3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid PATHCONF request");
return new PATHCONF3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle handle = request.getHandle();
Nfs3FileAttributes attrs;
if (LOG.isDebugEnabled()) {
LOG.debug("NFS PATHCONF fileId: " + handle.getFileId() + " client: " + remoteAddress);
}
try {
attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle), iug);
if (attrs == null) {
LOG.info("Can't get path for fileId: " + handle.getFileId());
return new PATHCONF3Response(Nfs3Status.NFS3ERR_STALE);
}
return new PATHCONF3Response(Nfs3Status.NFS3_OK, attrs, 0, HdfsServerConstants.MAX_PATH_LENGTH, true, false, false, true);
} catch (IOException e) {
LOG.warn("Exception ", e);
int status = mapErrorStatus(e);
return new PATHCONF3Response(status);
}
}
use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.
the class WriteManager method handleWrite.
void handleWrite(DFSClient dfsClient, WRITE3Request request, Channel channel, int xid, Nfs3FileAttributes preOpAttr) throws IOException {
int count = request.getCount();
byte[] data = request.getData().array();
if (data.length < count) {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
return;
}
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
LOG.debug("handleWrite " + request);
}
// Check if there is a stream to write
FileHandle fileHandle = request.getHandle();
OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
if (openFileCtx == null) {
LOG.info("No opened stream for fileId: " + fileHandle.getFileId());
String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle.getFileId());
HdfsDataOutputStream fos = null;
Nfs3FileAttributes latestAttr = null;
try {
int bufferSize = config.getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
fos = dfsClient.append(fileIdPath, bufferSize, EnumSet.of(CreateFlag.APPEND), null, null);
latestAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
} catch (RemoteException e) {
IOException io = e.unwrapRemoteException();
if (io instanceof AlreadyBeingCreatedException) {
LOG.warn("Can't append file: " + fileIdPath + ". Possibly the file is being closed. Drop the request: " + request + ", wait for the client to retry...");
return;
}
throw e;
} catch (IOException e) {
LOG.error("Can't append to file: " + fileIdPath, e);
if (fos != null) {
fos.close();
}
WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), preOpAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO, fileWcc, count, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
return;
}
// Add open stream
String writeDumpDir = config.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY, NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_DEFAULT);
openFileCtx = new OpenFileCtx(fos, latestAttr, writeDumpDir + "/" + fileHandle.getFileId(), dfsClient, iug, aixCompatMode, config);
if (!addOpenFileStream(fileHandle, openFileCtx)) {
LOG.info("Can't add new stream. Close it. Tell client to retry.");
try {
fos.close();
} catch (IOException e) {
LOG.error("Can't close stream for fileId: " + handle.getFileId(), e);
}
// Notify client to retry
WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_JUKEBOX, fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Opened stream for appending file: " + fileHandle.getFileId());
}
}
// Add write into the async job queue
openFileCtx.receivedNewWrite(dfsClient, request, channel, xid, asyncDataService, iug);
return;
}
use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.
the class WriteManager method getFileAttr.
Nfs3FileAttributes getFileAttr(DFSClient client, FileHandle dirHandle, String fileName) throws IOException {
String fileIdPath = Nfs3Utils.getFileIdPath(dirHandle) + "/" + fileName;
Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug);
if ((attr != null) && (attr.getType() == NfsFileType.NFSREG.toValue())) {
OpenFileCtx openFileCtx = fileContextCache.get(new FileHandle(attr.getFileId()));
if (openFileCtx != null) {
attr.setSize(openFileCtx.getNextOffset());
attr.setUsed(openFileCtx.getNextOffset());
}
}
return attr;
}
use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.
the class WriteManager method handleCommit.
void handleCommit(DFSClient dfsClient, FileHandle fileHandle, long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
long startTime = System.nanoTime();
int status;
OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
if (openFileCtx == null) {
LOG.info("No opened stream for fileId: " + fileHandle.getFileId() + " commitOffset=" + commitOffset + ". Return success in this case.");
status = Nfs3Status.NFS3_OK;
} else {
COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset, channel, xid, preOpAttr, false);
switch(ret) {
case COMMIT_FINISHED:
case COMMIT_INACTIVE_CTX:
status = Nfs3Status.NFS3_OK;
break;
case COMMIT_INACTIVE_WITH_PENDING_WRITE:
case COMMIT_ERROR:
status = Nfs3Status.NFS3ERR_IO;
break;
case COMMIT_WAIT:
// Do nothing. Commit is async now.
return;
case COMMIT_SPECIAL_WAIT:
status = Nfs3Status.NFS3ERR_JUKEBOX;
break;
case COMMIT_SPECIAL_SUCCESS:
status = Nfs3Status.NFS3_OK;
break;
default:
LOG.error("Should not get commit return code: " + ret.name());
throw new RuntimeException("Should not get commit return code: " + ret.name());
}
}
// Send out the response
Nfs3FileAttributes postOpAttr = null;
try {
postOpAttr = getFileAttr(dfsClient, new FileHandle(preOpAttr.getFileId()), iug);
} catch (IOException e1) {
LOG.info("Can't get postOpAttr for fileId: " + preOpAttr.getFileId(), e1);
}
WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
COMMIT3Response response = new COMMIT3Response(status, fileWcc, Nfs3Constant.WRITE_COMMIT_VERF);
RpcProgramNfs3.metrics.addCommit(Nfs3Utils.getElapsedTime(startTime));
Nfs3Utils.writeChannelCommit(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
}
use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.
the class TestOpenFileCtxCache method testEviction.
@Test
public void testEviction() throws IOException, InterruptedException {
NfsConfiguration conf = new NfsConfiguration();
// Only two entries will be in the cache
conf.setInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, 2);
DFSClient dfsClient = Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr = new Nfs3FileAttributes();
HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long) 0);
OpenFileCtx context1 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context2 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context3 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context4 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context5 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtxCache cache = new OpenFileCtxCache(conf, 10 * 60 * 100);
boolean ret = cache.put(new FileHandle(1), context1);
assertTrue(ret);
Thread.sleep(1000);
ret = cache.put(new FileHandle(2), context2);
assertTrue(ret);
ret = cache.put(new FileHandle(3), context3);
assertFalse(ret);
assertTrue(cache.size() == 2);
// Wait for the oldest stream to be evict-able, insert again
Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
assertTrue(cache.size() == 2);
ret = cache.put(new FileHandle(3), context3);
assertTrue(ret);
assertTrue(cache.size() == 2);
assertTrue(cache.get(new FileHandle(1)) == null);
// Test inactive entry is evicted immediately
context3.setActiveStatusForTest(false);
ret = cache.put(new FileHandle(4), context4);
assertTrue(ret);
// Now the cache has context2 and context4
// Test eviction failure if all entries have pending work.
context2.getPendingWritesForTest().put(new OffsetRange(0, 100), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
context4.getPendingCommitsForTest().put(new Long(100), new CommitCtx(0, null, 0, attr));
Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
ret = cache.put(new FileHandle(5), context5);
assertFalse(ret);
}
Aggregations