use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class TestFsck method testFsckMissingReplicas.
/**
* Tests that the # of missing block replicas and expected replicas is
* correct.
* @throws IOException
*/
@Test
public void testFsckMissingReplicas() throws IOException {
// Desired replication factor
// Set this higher than numReplicas so it's under-replicated
final short replFactor = 2;
// Number of replicas to actually start
final short numReplicas = 1;
// Number of blocks to write
final short numBlocks = 3;
// Set a small-ish blocksize
final long blockSize = 512;
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
DistributedFileSystem dfs = null;
// Startup a minicluster
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numReplicas).build();
assertNotNull("Failed Cluster Creation", cluster);
cluster.waitClusterUp();
dfs = cluster.getFileSystem();
assertNotNull("Failed to get FileSystem", dfs);
// Create a file that will be intentionally under-replicated
final String pathString = new String("/testfile");
final Path path = new Path(pathString);
long fileLen = blockSize * numBlocks;
DFSTestUtil.createFile(dfs, path, fileLen, replFactor, 1);
// Create an under-replicated file
NameNode namenode = cluster.getNameNode();
NetworkTopology nettop = cluster.getNamesystem().getBlockManager().getDatanodeManager().getNetworkTopology();
Map<String, String[]> pmap = new HashMap<String, String[]>();
Writer result = new StringWriter();
PrintWriter out = new PrintWriter(result, true);
InetAddress remoteAddress = InetAddress.getLocalHost();
NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, numReplicas, remoteAddress);
// Run the fsck and check the Result
final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(pathString);
assertNotNull(file);
Result replRes = new ReplicationResult(conf);
Result ecRes = new ErasureCodingResult(conf);
fsck.check(pathString, file, replRes, ecRes);
// Also print the output from the fsck, for ex post facto sanity checks
System.out.println(result.toString());
assertEquals(replRes.missingReplicas, (numBlocks * replFactor) - (numBlocks * numReplicas));
assertEquals(replRes.numExpectedReplicas, numBlocks * replFactor);
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class TestHDFSConcat method testConcatNotCompleteBlock.
// test case when final block is not of a full length
@Test
public void testConcatNotCompleteBlock() throws IOException {
long trgFileLen = blockSize * 3;
// block at the end - not full
long srcFileLen = blockSize * 3 + 20;
// create first file
String name1 = "/trg", name2 = "/src";
Path filePath1 = new Path(name1);
DFSTestUtil.createFile(dfs, filePath1, trgFileLen, REPL_FACTOR, 1);
HdfsFileStatus fStatus = nn.getFileInfo(name1);
long fileLen = fStatus.getLen();
assertEquals(fileLen, trgFileLen);
//read the file
FSDataInputStream stm = dfs.open(filePath1);
byte[] byteFile1 = new byte[(int) trgFileLen];
stm.readFully(0, byteFile1);
stm.close();
LocatedBlocks lb1 = nn.getBlockLocations(name1, 0, trgFileLen);
Path filePath2 = new Path(name2);
DFSTestUtil.createFile(dfs, filePath2, srcFileLen, REPL_FACTOR, 1);
fStatus = nn.getFileInfo(name2);
fileLen = fStatus.getLen();
assertEquals(srcFileLen, fileLen);
// read the file
stm = dfs.open(filePath2);
byte[] byteFile2 = new byte[(int) srcFileLen];
stm.readFully(0, byteFile2);
stm.close();
LocatedBlocks lb2 = nn.getBlockLocations(name2, 0, srcFileLen);
System.out.println("trg len=" + trgFileLen + "; src len=" + srcFileLen);
// move the blocks
dfs.concat(filePath1, new Path[] { filePath2 });
long totalLen = trgFileLen + srcFileLen;
fStatus = nn.getFileInfo(name1);
fileLen = fStatus.getLen();
// read the resulting file
stm = dfs.open(filePath1);
byte[] byteFileConcat = new byte[(int) fileLen];
stm.readFully(0, byteFileConcat);
stm.close();
LocatedBlocks lbConcat = nn.getBlockLocations(name1, 0, fileLen);
//verifications
// 1. number of blocks
assertEquals(lbConcat.locatedBlockCount(), lb1.locatedBlockCount() + lb2.locatedBlockCount());
// 2. file lengths
System.out.println("file1 len=" + fileLen + "; total len=" + totalLen);
assertEquals(fileLen, totalLen);
// 3. removal of the src file
fStatus = nn.getFileInfo(name2);
// file shouldn't exist
assertNull("File " + name2 + "still exists", fStatus);
// 4. content
checkFileContent(byteFileConcat, new byte[][] { byteFile1, byteFile2 });
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class TestINodeFile method testInodeId.
/**
* This test verifies inode ID counter and inode map functionality.
*/
@Test
public void testInodeId() throws IOException {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNamesystem();
long lastId = fsn.dir.getLastInodeId();
// Ensure root has the correct inode ID
// Last inode ID should be root inode ID and inode map size should be 1
int inodeCount = 1;
long expectedLastInodeId = INodeId.ROOT_INODE_ID;
assertEquals(fsn.dir.rootDir.getId(), INodeId.ROOT_INODE_ID);
assertEquals(expectedLastInodeId, lastId);
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Create a directory
// Last inode ID and inode map size should increase by 1
FileSystem fs = cluster.getFileSystem();
Path path = new Path("/test1");
assertTrue(fs.mkdirs(path));
assertEquals(++expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(++inodeCount, fsn.dir.getInodeMapSize());
// Create a file
// Last inode ID and inode map size should increase by 1
NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
DFSTestUtil.createFile(fs, new Path("/test1/file"), 1024, (short) 1, 0);
assertEquals(++expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(++inodeCount, fsn.dir.getInodeMapSize());
// Ensure right inode ID is returned in file status
HdfsFileStatus fileStatus = nnrpc.getFileInfo("/test1/file");
assertEquals(expectedLastInodeId, fileStatus.getFileId());
// Rename a directory
// Last inode ID and inode map size should not change
Path renamedPath = new Path("/test2");
assertTrue(fs.rename(path, renamedPath));
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Delete test2/file and test2 and ensure inode map size decreases
assertTrue(fs.delete(renamedPath, true));
inodeCount -= 2;
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Create and concat /test/file1 /test/file2
// Create /test1/file1 and /test1/file2
String file1 = "/test1/file1";
String file2 = "/test1/file2";
DFSTestUtil.createFile(fs, new Path(file1), 512, (short) 1, 0);
DFSTestUtil.createFile(fs, new Path(file2), 512, (short) 1, 0);
// test1, file1 and file2 are created
inodeCount += 3;
expectedLastInodeId += 3;
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
// Concat the /test1/file1 /test1/file2 into /test1/file2
nnrpc.concat(file2, new String[] { file1 });
// file1 and file2 are concatenated to file2
inodeCount--;
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertTrue(fs.delete(new Path("/test1"), true));
// test1 and file2 is deleted
inodeCount -= 2;
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Make sure editlog is loaded correctly
cluster.restartNameNode();
cluster.waitActive();
fsn = cluster.getNamesystem();
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Create two inodes test2 and test2/file2
DFSTestUtil.createFile(fs, new Path("/test2/file2"), 1024, (short) 1, 0);
expectedLastInodeId += 2;
inodeCount += 2;
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// create /test3, and /test3/file.
// /test3/file is a file under construction
FSDataOutputStream outStream = fs.create(new Path("/test3/file"));
assertTrue(outStream != null);
expectedLastInodeId += 2;
inodeCount += 2;
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Apply editlogs to fsimage, ensure inodeUnderConstruction is handled
fsn.enterSafeMode(false);
fsn.saveNamespace(0, 0);
fsn.leaveSafeMode(false);
outStream.close();
// The lastInodeId in fsimage should remain the same after reboot
cluster.restartNameNode();
cluster.waitActive();
fsn = cluster.getNamesystem();
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class WebHdfsFileSystem method getHdfsFileStatus.
private HdfsFileStatus getHdfsFileStatus(Path f) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.GETFILESTATUS;
HdfsFileStatus status = new FsPathResponseRunner<HdfsFileStatus>(op, f) {
@Override
HdfsFileStatus decodeResponse(Map<?, ?> json) {
return JsonUtilClient.toFileStatus(json, true);
}
}.run();
if (status == null) {
throw new FileNotFoundException("File does not exist: " + f);
}
return status;
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class RpcProgramNfs3 method readdir.
public READDIR3Response readdir(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
READDIR3Response response = new READDIR3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
READDIR3Request request;
try {
request = READDIR3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid READDIR request");
return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle handle = request.getHandle();
long cookie = request.getCookie();
if (cookie < 0) {
LOG.error("Invalid READDIR request, with negative cookie: " + cookie);
return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL);
}
long count = request.getCount();
if (count <= 0) {
LOG.info("Nonpositive count in invalid READDIR request: " + count);
return new READDIR3Response(Nfs3Status.NFS3_OK);
}
if (LOG.isDebugEnabled()) {
LOG.debug("NFS READDIR fileId: " + handle.getFileId() + " cookie: " + cookie + " count: " + count + " client: " + remoteAddress);
}
HdfsFileStatus dirStatus;
DirectoryListing dlisting;
Nfs3FileAttributes postOpAttr;
long dotdotFileId = 0;
try {
String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
dirStatus = dfsClient.getFileInfo(dirFileIdPath);
if (dirStatus == null) {
LOG.info("Can't get path for fileId: " + handle.getFileId());
return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
}
if (!dirStatus.isDir()) {
LOG.error("Can't readdir for regular file, fileId: " + handle.getFileId());
return new READDIR3Response(Nfs3Status.NFS3ERR_NOTDIR);
}
long cookieVerf = request.getCookieVerf();
if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) {
if (aixCompatMode) {
// The AIX NFS client misinterprets RFC-1813 and will repeatedly send
// the same cookieverf value even across VFS-level readdir calls,
// instead of getting a new cookieverf for every VFS-level readdir
// call, and reusing the cookieverf only in the event that multiple
// incremental NFS-level readdir calls must be made to fetch all of
// the directory entries. This means that whenever a readdir call is
// made by an AIX NFS client for a given directory, and that directory
// is subsequently modified, thus changing its mtime, no later readdir
// calls will succeed from AIX for that directory until the FS is
// unmounted/remounted. See HDFS-6549 for more info.
LOG.warn("AIX compatibility mode enabled, ignoring cookieverf " + "mismatches.");
} else {
LOG.error("CookieVerf mismatch. request cookieVerf: " + cookieVerf + " dir cookieVerf: " + dirStatus.getModificationTime());
return new READDIR3Response(Nfs3Status.NFS3ERR_BAD_COOKIE, Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug));
}
}
if (cookie == 0) {
// Get dotdot fileId
String dotdotFileIdPath = dirFileIdPath + "/..";
HdfsFileStatus dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);
if (dotdotStatus == null) {
// This should not happen
throw new IOException("Can't get path for handle path: " + dotdotFileIdPath);
}
dotdotFileId = dotdotStatus.getFileId();
}
// Get the list from the resume point
byte[] startAfter;
if (cookie == 0) {
startAfter = HdfsFileStatus.EMPTY_NAME;
} else {
String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
startAfter = inodeIdPath.getBytes(Charset.forName("UTF-8"));
}
dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
postOpAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (postOpAttr == null) {
LOG.error("Can't get path for fileId: " + handle.getFileId());
return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
}
} catch (IOException e) {
LOG.warn("Exception ", e);
int status = mapErrorStatus(e);
return new READDIR3Response(status);
}
/**
* Set up the dirents in the response. fileId is used as the cookie with one
* exception. Linux client can either be stuck with "ls" command (on REHL)
* or report "Too many levels of symbolic links" (Ubuntu).
*
* The problem is that, only two items returned, "." and ".." when the
* namespace is empty. Both of them are "/" with the same cookie(root
* fileId). Linux client doesn't think such a directory is a real directory.
* Even though NFS protocol specifies cookie is an opaque data, Linux client
* somehow doesn't like an empty dir returns same cookie for both "." and
* "..".
*
* The workaround is to use 0 as the cookie for "." and always return "." as
* the first entry in readdir/readdirplus response.
*/
HdfsFileStatus[] fstatus = dlisting.getPartialListing();
int n = (int) Math.min(fstatus.length, count - 2);
boolean eof = (n >= fstatus.length) && !dlisting.hasMore();
Entry3[] entries;
if (cookie == 0) {
entries = new Entry3[n + 2];
entries[0] = new READDIR3Response.Entry3(postOpAttr.getFileId(), ".", 0);
entries[1] = new READDIR3Response.Entry3(dotdotFileId, "..", dotdotFileId);
for (int i = 2; i < n + 2; i++) {
entries[i] = new READDIR3Response.Entry3(fstatus[i - 2].getFileId(), fstatus[i - 2].getLocalName(), fstatus[i - 2].getFileId());
}
} else {
// Resume from last readdirplus. If the cookie is "..", the result
// list is up the directory content since HDFS uses name as resume point.
entries = new Entry3[n];
for (int i = 0; i < n; i++) {
entries[i] = new READDIR3Response.Entry3(fstatus[i].getFileId(), fstatus[i].getLocalName(), fstatus[i].getFileId());
}
}
DirList3 dirList = new READDIR3Response.DirList3(entries, eof);
return new READDIR3Response(Nfs3Status.NFS3_OK, postOpAttr, dirStatus.getModificationTime(), dirList);
}
Aggregations