use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class FSNamesystem method startFile.
/**
* Create a new file entry in the namespace.
*
* For description of parameters and exceptions thrown see
* {@link ClientProtocol#create}, except it returns valid file status upon
* success
*/
HdfsFileStatus startFile(String src, PermissionStatus permissions, String holder, String clientMachine, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, CryptoProtocolVersion[] supportedVersions, boolean logRetryCache) throws IOException {
HdfsFileStatus status;
try {
status = startFileInt(src, permissions, holder, clientMachine, flag, createParent, replication, blockSize, supportedVersions, logRetryCache);
} catch (AccessControlException e) {
logAuditEvent(false, "create", src);
throw e;
}
logAuditEvent(true, "create", src, null, status);
return status;
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class FSNamesystem method removeXAttr.
void removeXAttr(String src, XAttr xAttr, boolean logRetryCache) throws IOException {
final String operationName = "removeXAttr";
HdfsFileStatus auditStat = null;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot remove XAttr entry on " + src);
auditStat = FSDirXAttrOp.removeXAttr(dir, src, xAttr, logRetryCache);
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
} finally {
writeUnlock(operationName);
}
getEditLog().logSync();
logAuditEvent(true, operationName, src, null, auditStat);
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class TestFileStatus method testGetFileInfo.
/** Test calling getFileInfo directly on the client */
@Test
public void testGetFileInfo() throws IOException {
// Check that / exists
Path path = new Path("/");
assertTrue("/ should be a directory", fs.getFileStatus(path).isDirectory());
// Make sure getFileInfo returns null for files which do not exist
HdfsFileStatus fileInfo = dfsClient.getFileInfo("/noSuchFile");
assertEquals("Non-existant file should result in null", null, fileInfo);
Path path1 = new Path("/name1");
Path path2 = new Path("/name1/name2");
assertTrue(fs.mkdirs(path1));
FSDataOutputStream out = fs.create(path2, false);
out.close();
fileInfo = dfsClient.getFileInfo(path1.toString());
assertEquals(1, fileInfo.getChildrenNum());
fileInfo = dfsClient.getFileInfo(path2.toString());
assertEquals(0, fileInfo.getChildrenNum());
// Test getFileInfo throws the right exception given a non-absolute path.
try {
dfsClient.getFileInfo("non-absolute");
fail("getFileInfo for a non-absolute path did not throw IOException");
} catch (RemoteException re) {
assertTrue("Wrong exception for invalid file name: " + re, re.toString().contains("Absolute path required"));
}
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class TestFsck method testECFsck.
@Test
public void testECFsck() throws Exception {
FileSystem fs = null;
final long precision = 1L;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
int parityBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
int totalSize = dataBlocks + parityBlocks;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(totalSize).build();
fs = cluster.getFileSystem();
// create a contiguous file
Path replDirPath = new Path("/replicated");
Path replFilePath = new Path(replDirPath, "replfile");
final short factor = 3;
DFSTestUtil.createFile(fs, replFilePath, 1024, factor, 0);
DFSTestUtil.waitReplication(fs, replFilePath, factor);
// create a large striped file
Path ecDirPath = new Path("/striped");
Path largeFilePath = new Path(ecDirPath, "largeFile");
DFSTestUtil.createStripedFile(cluster, largeFilePath, ecDirPath, 1, 2, true);
// create a small striped file
Path smallFilePath = new Path(ecDirPath, "smallFile");
DFSTestUtil.writeFile(fs, smallFilePath, "hello world!");
long replTime = fs.getFileStatus(replFilePath).getAccessTime();
long ecTime = fs.getFileStatus(largeFilePath).getAccessTime();
Thread.sleep(precision);
setupAuditLogs();
String outStr = runFsck(conf, 0, true, "/");
verifyAuditLogs();
assertEquals(replTime, fs.getFileStatus(replFilePath).getAccessTime());
assertEquals(ecTime, fs.getFileStatus(largeFilePath).getAccessTime());
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
shutdownCluster();
// restart the cluster; bring up namenode but not the data nodes
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
outStr = runFsck(conf, 1, true, "/", "-files", "-blocks");
// expect the result is corrupt
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
String[] outLines = outStr.split("\\r?\\n");
for (String line : outLines) {
if (line.contains(largeFilePath.toString())) {
final HdfsFileStatus file = cluster.getNameNode().getRpcServer().getFileInfo(largeFilePath.toString());
assertTrue(line.contains("policy=" + file.getErasureCodingPolicy().getName()));
} else if (line.contains(replFilePath.toString())) {
assertTrue(line.contains("replication=" + cluster.getFileSystem().getFileStatus(replFilePath).getReplication()));
}
}
System.out.println(outStr);
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class TestFsck method testFsckFileNotFound.
/** Test fsck with FileNotFound. */
@Test
public void testFsckFileNotFound() throws Exception {
// Number of replicas to actually start
final short numReplicas = 1;
NameNode namenode = mock(NameNode.class);
NetworkTopology nettop = mock(NetworkTopology.class);
Map<String, String[]> pmap = new HashMap<>();
Writer result = new StringWriter();
PrintWriter out = new PrintWriter(result, true);
InetAddress remoteAddress = InetAddress.getLocalHost();
FSNamesystem fsName = mock(FSNamesystem.class);
FSDirectory fsd = mock(FSDirectory.class);
BlockManager blockManager = mock(BlockManager.class);
DatanodeManager dnManager = mock(DatanodeManager.class);
INodesInPath iip = mock(INodesInPath.class);
when(namenode.getNamesystem()).thenReturn(fsName);
when(fsName.getBlockManager()).thenReturn(blockManager);
when(fsName.getFSDirectory()).thenReturn(fsd);
when(fsd.getFSNamesystem()).thenReturn(fsName);
when(fsd.resolvePath(anyObject(), anyString(), any(DirOp.class))).thenReturn(iip);
when(blockManager.getDatanodeManager()).thenReturn(dnManager);
NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, numReplicas, remoteAddress);
String pathString = "/tmp/testFile";
long length = 123L;
boolean isDir = false;
int blockReplication = 1;
long blockSize = 128 * 1024L;
long modTime = 123123123L;
long accessTime = 123123120L;
FsPermission perms = FsPermission.getDefault();
String owner = "foo";
String group = "bar";
byte[] symlink = null;
byte[] path = DFSUtil.string2Bytes(pathString);
long fileId = 312321L;
int numChildren = 1;
byte storagePolicy = 0;
HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication, blockSize, modTime, accessTime, perms, owner, group, symlink, path, fileId, numChildren, null, storagePolicy, null);
Result replRes = new ReplicationResult(conf);
Result ecRes = new ErasureCodingResult(conf);
try {
fsck.check(pathString, file, replRes, ecRes);
} catch (Exception e) {
fail("Unexpected exception " + e.getMessage());
}
assertTrue(replRes.isHealthy());
}
Aggregations