Search in sources :

Example 26 with Nfs3FileAttributes

use of org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes in project hadoop by apache.

the class TestOpenFileCtxCache method testScan.

@Test
public void testScan() throws IOException, InterruptedException {
    NfsConfiguration conf = new NfsConfiguration();
    // Only two entries will be in the cache
    conf.setInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, 2);
    DFSClient dfsClient = Mockito.mock(DFSClient.class);
    Nfs3FileAttributes attr = new Nfs3FileAttributes();
    HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
    Mockito.when(fos.getPos()).thenReturn((long) 0);
    OpenFileCtx context1 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
    OpenFileCtx context2 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
    OpenFileCtx context3 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
    OpenFileCtx context4 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
    OpenFileCtxCache cache = new OpenFileCtxCache(conf, 10 * 60 * 100);
    // Test cleaning expired entry
    boolean ret = cache.put(new FileHandle(1), context1);
    assertTrue(ret);
    ret = cache.put(new FileHandle(2), context2);
    assertTrue(ret);
    Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT + 1);
    cache.scan(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
    assertTrue(cache.size() == 0);
    // Test cleaning inactive entry
    ret = cache.put(new FileHandle(3), context3);
    assertTrue(ret);
    ret = cache.put(new FileHandle(4), context4);
    assertTrue(ret);
    context3.setActiveStatusForTest(false);
    cache.scan(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_DEFAULT);
    assertTrue(cache.size() == 1);
    assertTrue(cache.get(new FileHandle(3)) == null);
    assertTrue(cache.get(new FileHandle(4)) != null);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) ShellBasedIdMapping(org.apache.hadoop.security.ShellBasedIdMapping) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) Test(org.junit.Test)

Example 27 with Nfs3FileAttributes

use of org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes in project hadoop by apache.

the class Nfs3Utils method getNfs3FileAttrFromFileStatus.

public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus(HdfsFileStatus fs, IdMappingServiceProvider iug) {
    /**
     * Some 32bit Linux client has problem with 64bit fileId: it seems the 32bit
     * client takes only the lower 32bit of the fileId and treats it as signed
     * int. When the 32th bit is 1, the client considers it invalid.
     */
    NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG;
    fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType;
    int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1;
    long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs.getChildrenNum()) : fs.getLen();
    return new Nfs3FileAttributes(fileType, nlink, fs.getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()), iug.getGidAllowingUnknown(fs.getGroup()), size, 0, /* fsid */
    fs.getFileId(), fs.getModificationTime(), fs.getAccessTime(), new Nfs3FileAttributes.Specdata3());
}
Also used : NfsFileType(org.apache.hadoop.nfs.NfsFileType) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)

Example 28 with Nfs3FileAttributes

use of org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes in project hadoop by apache.

the class ACCESS3Response method deserialize.

public static ACCESS3Response deserialize(XDR xdr) {
    int status = xdr.readInt();
    Nfs3FileAttributes postOpAttr = null;
    int access = 0;
    if (status == Nfs3Status.NFS3_OK) {
        postOpAttr = Nfs3FileAttributes.deserialize(xdr);
        access = xdr.readInt();
    }
    return new ACCESS3Response(status, postOpAttr, access);
}
Also used : Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)

Example 29 with Nfs3FileAttributes

use of org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes in project hadoop by apache.

the class CREATE3Response method deserialize.

public static CREATE3Response deserialize(XDR xdr) {
    int status = xdr.readInt();
    FileHandle objHandle = new FileHandle();
    Nfs3FileAttributes postOpObjAttr = null;
    if (status == Nfs3Status.NFS3_OK) {
        xdr.readBoolean();
        objHandle.deserialize(xdr);
        xdr.readBoolean();
        postOpObjAttr = Nfs3FileAttributes.deserialize(xdr);
    }
    WccData dirWcc = WccData.deserialize(xdr);
    return new CREATE3Response(status, objHandle, postOpObjAttr, dirWcc);
}
Also used : FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)

Example 30 with Nfs3FileAttributes

use of org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes in project hadoop by apache.

the class FSINFO3Response method deserialize.

public static FSINFO3Response deserialize(XDR xdr) {
    int status = xdr.readInt();
    xdr.readBoolean();
    Nfs3FileAttributes postOpObjAttr = Nfs3FileAttributes.deserialize(xdr);
    int rtmax = 0;
    int rtpref = 0;
    int rtmult = 0;
    int wtmax = 0;
    int wtpref = 0;
    int wtmult = 0;
    int dtpref = 0;
    long maxFileSize = 0;
    NfsTime timeDelta = null;
    int properties = 0;
    if (status == Nfs3Status.NFS3_OK) {
        rtmax = xdr.readInt();
        rtpref = xdr.readInt();
        rtmult = xdr.readInt();
        wtmax = xdr.readInt();
        wtpref = xdr.readInt();
        wtmult = xdr.readInt();
        dtpref = xdr.readInt();
        maxFileSize = xdr.readHyper();
        timeDelta = NfsTime.deserialize(xdr);
        properties = xdr.readInt();
    }
    return new FSINFO3Response(status, postOpObjAttr, rtmax, rtpref, rtmult, wtmax, wtpref, wtmult, dtpref, maxFileSize, timeDelta, properties);
}
Also used : Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) NfsTime(org.apache.hadoop.nfs.NfsTime)

Aggregations

Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)50 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)30 DFSClient (org.apache.hadoop.hdfs.DFSClient)27 IOException (java.io.IOException)22 VisibleForTesting (com.google.common.annotations.VisibleForTesting)18 WccData (org.apache.hadoop.nfs.nfs3.response.WccData)11 HdfsDataOutputStream (org.apache.hadoop.hdfs.client.HdfsDataOutputStream)10 Test (org.junit.Test)9 NfsConfiguration (org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration)8 ShellBasedIdMapping (org.apache.hadoop.security.ShellBasedIdMapping)8 COMMIT_STATUS (org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS)6 CommitCtx (org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx)5 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)5 RemoteException (org.apache.hadoop.ipc.RemoteException)4 Channel (org.jboss.netty.channel.Channel)4 ArrayList (java.util.ArrayList)2 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)2 FsPermission (org.apache.hadoop.fs.permission.FsPermission)2 DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)2 NfsTime (org.apache.hadoop.nfs.NfsTime)2