use of org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes in project hadoop by apache.
the class WriteManager method handleWrite.
void handleWrite(DFSClient dfsClient, WRITE3Request request, Channel channel, int xid, Nfs3FileAttributes preOpAttr) throws IOException {
int count = request.getCount();
byte[] data = request.getData().array();
if (data.length < count) {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
return;
}
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
LOG.debug("handleWrite " + request);
}
// Check if there is a stream to write
FileHandle fileHandle = request.getHandle();
OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
if (openFileCtx == null) {
LOG.info("No opened stream for fileId: " + fileHandle.getFileId());
String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle.getFileId());
HdfsDataOutputStream fos = null;
Nfs3FileAttributes latestAttr = null;
try {
int bufferSize = config.getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
fos = dfsClient.append(fileIdPath, bufferSize, EnumSet.of(CreateFlag.APPEND), null, null);
latestAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
} catch (RemoteException e) {
IOException io = e.unwrapRemoteException();
if (io instanceof AlreadyBeingCreatedException) {
LOG.warn("Can't append file: " + fileIdPath + ". Possibly the file is being closed. Drop the request: " + request + ", wait for the client to retry...");
return;
}
throw e;
} catch (IOException e) {
LOG.error("Can't append to file: " + fileIdPath, e);
if (fos != null) {
fos.close();
}
WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), preOpAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO, fileWcc, count, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
return;
}
// Add open stream
String writeDumpDir = config.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY, NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_DEFAULT);
openFileCtx = new OpenFileCtx(fos, latestAttr, writeDumpDir + "/" + fileHandle.getFileId(), dfsClient, iug, aixCompatMode, config);
if (!addOpenFileStream(fileHandle, openFileCtx)) {
LOG.info("Can't add new stream. Close it. Tell client to retry.");
try {
fos.close();
} catch (IOException e) {
LOG.error("Can't close stream for fileId: " + handle.getFileId(), e);
}
// Notify client to retry
WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_JUKEBOX, fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Opened stream for appending file: " + fileHandle.getFileId());
}
}
// Add write into the async job queue
openFileCtx.receivedNewWrite(dfsClient, request, channel, xid, asyncDataService, iug);
return;
}
use of org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes in project hadoop by apache.
the class WriteManager method getFileAttr.
Nfs3FileAttributes getFileAttr(DFSClient client, FileHandle dirHandle, String fileName) throws IOException {
String fileIdPath = Nfs3Utils.getFileIdPath(dirHandle) + "/" + fileName;
Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug);
if ((attr != null) && (attr.getType() == NfsFileType.NFSREG.toValue())) {
OpenFileCtx openFileCtx = fileContextCache.get(new FileHandle(attr.getFileId()));
if (openFileCtx != null) {
attr.setSize(openFileCtx.getNextOffset());
attr.setUsed(openFileCtx.getNextOffset());
}
}
return attr;
}
use of org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes in project hadoop by apache.
the class WriteManager method handleCommit.
void handleCommit(DFSClient dfsClient, FileHandle fileHandle, long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
long startTime = System.nanoTime();
int status;
OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
if (openFileCtx == null) {
LOG.info("No opened stream for fileId: " + fileHandle.getFileId() + " commitOffset=" + commitOffset + ". Return success in this case.");
status = Nfs3Status.NFS3_OK;
} else {
COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset, channel, xid, preOpAttr, false);
switch(ret) {
case COMMIT_FINISHED:
case COMMIT_INACTIVE_CTX:
status = Nfs3Status.NFS3_OK;
break;
case COMMIT_INACTIVE_WITH_PENDING_WRITE:
case COMMIT_ERROR:
status = Nfs3Status.NFS3ERR_IO;
break;
case COMMIT_WAIT:
// Do nothing. Commit is async now.
return;
case COMMIT_SPECIAL_WAIT:
status = Nfs3Status.NFS3ERR_JUKEBOX;
break;
case COMMIT_SPECIAL_SUCCESS:
status = Nfs3Status.NFS3_OK;
break;
default:
LOG.error("Should not get commit return code: " + ret.name());
throw new RuntimeException("Should not get commit return code: " + ret.name());
}
}
// Send out the response
Nfs3FileAttributes postOpAttr = null;
try {
postOpAttr = getFileAttr(dfsClient, new FileHandle(preOpAttr.getFileId()), iug);
} catch (IOException e1) {
LOG.info("Can't get postOpAttr for fileId: " + preOpAttr.getFileId(), e1);
}
WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
COMMIT3Response response = new COMMIT3Response(status, fileWcc, Nfs3Constant.WRITE_COMMIT_VERF);
RpcProgramNfs3.metrics.addCommit(Nfs3Utils.getElapsedTime(startTime));
Nfs3Utils.writeChannelCommit(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
}
use of org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes in project hadoop by apache.
the class TestNfs3Utils method testGetAccessRightsForUserGroup.
@Test
public void testGetAccessRightsForUserGroup() throws IOException {
Nfs3FileAttributes attr = Mockito.mock(Nfs3FileAttributes.class);
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(3);
// 700
Mockito.when(attr.getMode()).thenReturn(448);
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue());
assertEquals("No access should be allowed as UID does not match attribute over mode 700", 0, Nfs3Utils.getAccessRightsForUserGroup(3, 3, null, attr));
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(3);
// 070
Mockito.when(attr.getMode()).thenReturn(56);
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue());
assertEquals("No access should be allowed as GID does not match attribute over mode 070", 0, Nfs3Utils.getAccessRightsForUserGroup(2, 4, null, attr));
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(3);
// 007
Mockito.when(attr.getMode()).thenReturn(7);
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue());
assertEquals("Access should be allowed as mode is 007 and UID/GID do not match", 61, /* RWX */
Nfs3Utils.getAccessRightsForUserGroup(1, 4, new int[] { 5, 6 }, attr));
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(10);
// 440
Mockito.when(attr.getMode()).thenReturn(288);
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue());
assertEquals("Access should be allowed as mode is 440 and Aux GID does match", 1, /* R */
Nfs3Utils.getAccessRightsForUserGroup(3, 4, new int[] { 5, 16, 10 }, attr));
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(10);
// 700
Mockito.when(attr.getMode()).thenReturn(448);
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSDIR.toValue());
assertEquals("Access should be allowed for dir as mode is 700 and UID does match", 31, /* Lookup */
Nfs3Utils.getAccessRightsForUserGroup(2, 4, new int[] { 5, 16, 10 }, attr));
assertEquals("No access should be allowed for dir as mode is 700 even though GID does match", 0, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] { 5, 16, 4 }, attr));
assertEquals("No access should be allowed for dir as mode is 700 even though AuxGID does match", 0, Nfs3Utils.getAccessRightsForUserGroup(3, 20, new int[] { 5, 10 }, attr));
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(10);
// 711
Mockito.when(attr.getMode()).thenReturn(457);
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSDIR.toValue());
assertEquals("Access should be allowed for dir as mode is 711 and GID matches", 2, /* Lookup */
Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] { 5, 16, 11 }, attr));
}
use of org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes in project hadoop by apache.
the class TestOpenFileCtxCache method testEviction.
@Test
public void testEviction() throws IOException, InterruptedException {
NfsConfiguration conf = new NfsConfiguration();
// Only two entries will be in the cache
conf.setInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, 2);
DFSClient dfsClient = Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr = new Nfs3FileAttributes();
HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long) 0);
OpenFileCtx context1 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context2 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context3 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context4 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context5 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtxCache cache = new OpenFileCtxCache(conf, 10 * 60 * 100);
boolean ret = cache.put(new FileHandle(1), context1);
assertTrue(ret);
Thread.sleep(1000);
ret = cache.put(new FileHandle(2), context2);
assertTrue(ret);
ret = cache.put(new FileHandle(3), context3);
assertFalse(ret);
assertTrue(cache.size() == 2);
// Wait for the oldest stream to be evict-able, insert again
Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
assertTrue(cache.size() == 2);
ret = cache.put(new FileHandle(3), context3);
assertTrue(ret);
assertTrue(cache.size() == 2);
assertTrue(cache.get(new FileHandle(1)) == null);
// Test inactive entry is evicted immediately
context3.setActiveStatusForTest(false);
ret = cache.put(new FileHandle(4), context4);
assertTrue(ret);
// Now the cache has context2 and context4
// Test eviction failure if all entries have pending work.
context2.getPendingWritesForTest().put(new OffsetRange(0, 100), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
context4.getPendingCommitsForTest().put(new Long(100), new CommitCtx(0, null, 0, attr));
Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
ret = cache.put(new FileHandle(5), context5);
assertFalse(ret);
}
Aggregations