Search in sources :

Example 41 with FSDataInputStream

use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.

the class RpcProgramNfs3 method read.

@VisibleForTesting
READ3Response read(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    READ3Response response = new READ3Response(Nfs3Status.NFS3_OK);
    final String userName = securityHandler.getUser();
    if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
        response.setStatus(Nfs3Status.NFS3ERR_ACCES);
        return response;
    }
    DFSClient dfsClient = clientCache.getDfsClient(userName);
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    READ3Request request;
    try {
        request = READ3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid READ request");
        return new READ3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    long offset = request.getOffset();
    int count = request.getCount();
    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS READ fileId: " + handle.getFileId() + " offset: " + offset + " count: " + count + " client: " + remoteAddress);
    }
    Nfs3FileAttributes attrs;
    boolean eof;
    if (count == 0) {
        // Only do access check.
        try {
            // Don't read from cache. Client may not have read permission.
            attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle), iug);
        } catch (IOException e) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Get error accessing file, fileId: " + handle.getFileId(), e);
            }
            return new READ3Response(Nfs3Status.NFS3ERR_IO);
        }
        if (attrs == null) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Can't get path for fileId: " + handle.getFileId());
            }
            return new READ3Response(Nfs3Status.NFS3ERR_NOENT);
        }
        int access = Nfs3Utils.getAccessRightsForUserGroup(securityHandler.getUid(), securityHandler.getGid(), securityHandler.getAuxGids(), attrs);
        if ((access & Nfs3Constant.ACCESS3_READ) != 0) {
            eof = offset >= attrs.getSize();
            return new READ3Response(Nfs3Status.NFS3_OK, attrs, 0, eof, ByteBuffer.wrap(new byte[0]));
        } else {
            return new READ3Response(Nfs3Status.NFS3ERR_ACCES);
        }
    }
    // In case there is buffered data for the same file, flush it. This can be
    // optimized later by reading from the cache.
    int ret = writeManager.commitBeforeRead(dfsClient, handle, offset + count);
    if (ret != Nfs3Status.NFS3_OK) {
        LOG.warn("commitBeforeRead didn't succeed with ret=" + ret + ". Read may not get most recent data.");
    }
    try {
        int rtmax = config.getInt(NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY, NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT);
        int buffSize = Math.min(rtmax, count);
        byte[] readbuffer = new byte[buffSize];
        int readCount = 0;
        /**
       * Retry exactly once because the DFSInputStream can be stale.
       */
        for (int i = 0; i < 1; ++i) {
            FSDataInputStream fis = clientCache.getDfsInputStream(userName, Nfs3Utils.getFileIdPath(handle));
            if (fis == null) {
                return new READ3Response(Nfs3Status.NFS3ERR_ACCES);
            }
            try {
                readCount = fis.read(offset, readbuffer, 0, count);
                metrics.incrBytesRead(readCount);
            } catch (IOException e) {
                // which requires incompatible changes.
                if (e.getMessage().equals("Stream closed")) {
                    clientCache.invalidateDfsInputStream(userName, Nfs3Utils.getFileIdPath(handle));
                    continue;
                } else {
                    throw e;
                }
            }
        }
        attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle), iug);
        if (readCount < count) {
            LOG.info("Partial read. Asked offset: " + offset + " count: " + count + " and read back: " + readCount + " file size: " + attrs.getSize());
        }
        // HDFS returns -1 for read beyond file size.
        if (readCount < 0) {
            readCount = 0;
        }
        eof = (offset + readCount) >= attrs.getSize();
        return new READ3Response(Nfs3Status.NFS3_OK, attrs, readCount, eof, ByteBuffer.wrap(readbuffer));
    } catch (IOException e) {
        LOG.warn("Read error: " + e.getClass() + " offset: " + offset + " count: " + count, e);
        int status = mapErrorStatus(e);
        return new READ3Response(status);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) READ3Request(org.apache.hadoop.nfs.nfs3.request.READ3Request) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) IOException(java.io.IOException) READ3Response(org.apache.hadoop.nfs.nfs3.response.READ3Response) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 42 with FSDataInputStream

use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.

the class TestRpcProgramNfs3 method getFileContentsUsingDfs.

private byte[] getFileContentsUsingDfs(String fileName, int len) throws Exception {
    final FSDataInputStream in = hdfs.open(new Path(fileName));
    final byte[] ret = new byte[len];
    in.readFully(ret);
    try {
        in.readByte();
        Assert.fail("expected end of file");
    } catch (EOFException e) {
    // expected. Unfortunately there is no associated message to check
    }
    in.close();
    return ret;
}
Also used : Path(org.apache.hadoop.fs.Path) EOFException(java.io.EOFException) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream)

Example 43 with FSDataInputStream

use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.

the class AppendTestUtil method check.

public static void check(FileSystem fs, Path p, long length) throws IOException {
    int i = -1;
    try {
        final FileStatus status = fs.getFileStatus(p);
        FSDataInputStream in = fs.open(p);
        if (in.getWrappedStream() instanceof DFSInputStream) {
            long len = ((DFSInputStream) in.getWrappedStream()).getFileLength();
            assertEquals(length, len);
        } else {
            assertEquals(length, status.getLen());
        }
        for (i++; i < length; i++) {
            assertEquals((byte) i, (byte) in.read());
        }
        i = -(int) length;
        //EOF  
        assertEquals(-1, in.read());
        in.close();
    } catch (IOException ioe) {
        throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
    }
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) IOException(java.io.IOException)

Example 44 with FSDataInputStream

use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.

the class DFSTestUtil method checkFiles.

/** check if the files have been copied correctly. */
public boolean checkFiles(FileSystem fs, String topdir) throws IOException {
    Path root = new Path(topdir);
    for (int idx = 0; idx < nFiles; idx++) {
        Path fPath = new Path(root, files[idx].getName());
        try (FSDataInputStream in = fs.open(fPath)) {
            byte[] toRead = new byte[files[idx].getSize()];
            byte[] toCompare = new byte[files[idx].getSize()];
            Random rb = new Random(files[idx].getSeed());
            rb.nextBytes(toCompare);
            in.readFully(0, toRead);
            for (int i = 0; i < toRead.length; i++) {
                if (toRead[i] != toCompare[i]) {
                    return false;
                }
            }
        }
    }
    return true;
}
Also used : Path(org.apache.hadoop.fs.Path) Random(java.util.Random) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream)

Example 45 with FSDataInputStream

use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.

the class AppendTestUtil method checkFullFile.

public static void checkFullFile(FileSystem fs, Path name, int len, final byte[] compareContent, String message, boolean checkFileStatus) throws IOException {
    if (checkFileStatus) {
        final FileStatus status = fs.getFileStatus(name);
        assertEquals("len=" + len + " but status.getLen()=" + status.getLen(), len, status.getLen());
    }
    FSDataInputStream stm = fs.open(name);
    byte[] actual = new byte[len];
    stm.readFully(0, actual);
    checkData(actual, 0, compareContent, message);
    stm.close();
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream)

Aggregations

FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)431 Path (org.apache.hadoop.fs.Path)271 FileSystem (org.apache.hadoop.fs.FileSystem)143 Test (org.junit.Test)135 IOException (java.io.IOException)125 Configuration (org.apache.hadoop.conf.Configuration)94 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)93 FileStatus (org.apache.hadoop.fs.FileStatus)62 InputStreamReader (java.io.InputStreamReader)37 BufferedReader (java.io.BufferedReader)36 FileNotFoundException (java.io.FileNotFoundException)26 IgfsPath (org.apache.ignite.igfs.IgfsPath)26 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)21 ArrayList (java.util.ArrayList)20 Random (java.util.Random)19 EOFException (java.io.EOFException)18 HashMap (java.util.HashMap)16 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)15 URI (java.net.URI)14 File (java.io.File)13