use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class RpcProgramNfs3 method read.
@VisibleForTesting
READ3Response read(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
READ3Response response = new READ3Response(Nfs3Status.NFS3_OK);
final String userName = securityHandler.getUser();
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
DFSClient dfsClient = clientCache.getDfsClient(userName);
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
READ3Request request;
try {
request = READ3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid READ request");
return new READ3Response(Nfs3Status.NFS3ERR_INVAL);
}
long offset = request.getOffset();
int count = request.getCount();
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS READ fileId: " + handle.getFileId() + " offset: " + offset + " count: " + count + " client: " + remoteAddress);
}
Nfs3FileAttributes attrs;
boolean eof;
if (count == 0) {
// Only do access check.
try {
// Don't read from cache. Client may not have read permission.
attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle), iug);
} catch (IOException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Get error accessing file, fileId: " + handle.getFileId(), e);
}
return new READ3Response(Nfs3Status.NFS3ERR_IO);
}
if (attrs == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Can't get path for fileId: " + handle.getFileId());
}
return new READ3Response(Nfs3Status.NFS3ERR_NOENT);
}
int access = Nfs3Utils.getAccessRightsForUserGroup(securityHandler.getUid(), securityHandler.getGid(), securityHandler.getAuxGids(), attrs);
if ((access & Nfs3Constant.ACCESS3_READ) != 0) {
eof = offset >= attrs.getSize();
return new READ3Response(Nfs3Status.NFS3_OK, attrs, 0, eof, ByteBuffer.wrap(new byte[0]));
} else {
return new READ3Response(Nfs3Status.NFS3ERR_ACCES);
}
}
// In case there is buffered data for the same file, flush it. This can be
// optimized later by reading from the cache.
int ret = writeManager.commitBeforeRead(dfsClient, handle, offset + count);
if (ret != Nfs3Status.NFS3_OK) {
LOG.warn("commitBeforeRead didn't succeed with ret=" + ret + ". Read may not get most recent data.");
}
try {
int rtmax = config.getInt(NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY, NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT);
int buffSize = Math.min(rtmax, count);
byte[] readbuffer = new byte[buffSize];
int readCount = 0;
/**
* Retry exactly once because the DFSInputStream can be stale.
*/
for (int i = 0; i < 1; ++i) {
FSDataInputStream fis = clientCache.getDfsInputStream(userName, Nfs3Utils.getFileIdPath(handle));
if (fis == null) {
return new READ3Response(Nfs3Status.NFS3ERR_ACCES);
}
try {
readCount = fis.read(offset, readbuffer, 0, count);
metrics.incrBytesRead(readCount);
} catch (IOException e) {
// which requires incompatible changes.
if (e.getMessage().equals("Stream closed")) {
clientCache.invalidateDfsInputStream(userName, Nfs3Utils.getFileIdPath(handle));
continue;
} else {
throw e;
}
}
}
attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle), iug);
if (readCount < count) {
LOG.info("Partial read. Asked offset: " + offset + " count: " + count + " and read back: " + readCount + " file size: " + attrs.getSize());
}
// HDFS returns -1 for read beyond file size.
if (readCount < 0) {
readCount = 0;
}
eof = (offset + readCount) >= attrs.getSize();
return new READ3Response(Nfs3Status.NFS3_OK, attrs, readCount, eof, ByteBuffer.wrap(readbuffer));
} catch (IOException e) {
LOG.warn("Read error: " + e.getClass() + " offset: " + offset + " count: " + count, e);
int status = mapErrorStatus(e);
return new READ3Response(status);
}
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestRpcProgramNfs3 method getFileContentsUsingDfs.
private byte[] getFileContentsUsingDfs(String fileName, int len) throws Exception {
final FSDataInputStream in = hdfs.open(new Path(fileName));
final byte[] ret = new byte[len];
in.readFully(ret);
try {
in.readByte();
Assert.fail("expected end of file");
} catch (EOFException e) {
// expected. Unfortunately there is no associated message to check
}
in.close();
return ret;
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class AppendTestUtil method check.
public static void check(FileSystem fs, Path p, long length) throws IOException {
int i = -1;
try {
final FileStatus status = fs.getFileStatus(p);
FSDataInputStream in = fs.open(p);
if (in.getWrappedStream() instanceof DFSInputStream) {
long len = ((DFSInputStream) in.getWrappedStream()).getFileLength();
assertEquals(length, len);
} else {
assertEquals(length, status.getLen());
}
for (i++; i < length; i++) {
assertEquals((byte) i, (byte) in.read());
}
i = -(int) length;
//EOF
assertEquals(-1, in.read());
in.close();
} catch (IOException ioe) {
throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
}
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class DFSTestUtil method checkFiles.
/** check if the files have been copied correctly. */
public boolean checkFiles(FileSystem fs, String topdir) throws IOException {
Path root = new Path(topdir);
for (int idx = 0; idx < nFiles; idx++) {
Path fPath = new Path(root, files[idx].getName());
try (FSDataInputStream in = fs.open(fPath)) {
byte[] toRead = new byte[files[idx].getSize()];
byte[] toCompare = new byte[files[idx].getSize()];
Random rb = new Random(files[idx].getSeed());
rb.nextBytes(toCompare);
in.readFully(0, toRead);
for (int i = 0; i < toRead.length; i++) {
if (toRead[i] != toCompare[i]) {
return false;
}
}
}
}
return true;
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class AppendTestUtil method checkFullFile.
public static void checkFullFile(FileSystem fs, Path name, int len, final byte[] compareContent, String message, boolean checkFileStatus) throws IOException {
if (checkFileStatus) {
final FileStatus status = fs.getFileStatus(name);
assertEquals("len=" + len + " but status.getLen()=" + status.getLen(), len, status.getLen());
}
FSDataInputStream stm = fs.open(name);
byte[] actual = new byte[len];
stm.readFully(0, actual);
checkData(actual, 0, compareContent, message);
stm.close();
}
Aggregations