use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class HttpFSFileSystem method listStatusBatch.
@Override
public DirectoryEntries listStatusBatch(Path f, byte[] token) throws FileNotFoundException, IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.LISTSTATUS_BATCH.toString());
if (token != null) {
params.put(START_AFTER_PARAM, new String(token, Charsets.UTF_8));
}
HttpURLConnection conn = getConnection(Operation.LISTSTATUS_BATCH.getMethod(), params, f, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
// Parse the FileStatus array
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
JSONObject listing = (JSONObject) json.get(DIRECTORY_LISTING_JSON);
FileStatus[] statuses = toFileStatuses((JSONObject) listing.get(PARTIAL_LISTING_JSON), f);
// New token is the last FileStatus entry
byte[] newToken = null;
if (statuses.length > 0) {
newToken = statuses[statuses.length - 1].getPath().getName().toString().getBytes(Charsets.UTF_8);
}
// Parse the remainingEntries boolean into hasMore
final long remainingEntries = (Long) listing.get(REMAINING_ENTRIES_JSON);
final boolean hasMore = remainingEntries > 0 ? true : false;
return new DirectoryEntries(statuses, newToken, hasMore);
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class HttpFSFileSystem method createFileStatus.
private FileStatus createFileStatus(Path parent, JSONObject json) {
String pathSuffix = (String) json.get(PATH_SUFFIX_JSON);
Path path = (pathSuffix.equals("")) ? parent : new Path(parent, pathSuffix);
FILE_TYPE type = FILE_TYPE.valueOf((String) json.get(TYPE_JSON));
long len = (Long) json.get(LENGTH_JSON);
String owner = (String) json.get(OWNER_JSON);
String group = (String) json.get(GROUP_JSON);
final FsPermission permission = toFsPermission(json);
long aTime = (Long) json.get(ACCESS_TIME_JSON);
long mTime = (Long) json.get(MODIFICATION_TIME_JSON);
long blockSize = (Long) json.get(BLOCK_SIZE_JSON);
short replication = ((Long) json.get(REPLICATION_JSON)).shortValue();
FileStatus fileStatus = null;
switch(type) {
case FILE:
case DIRECTORY:
fileStatus = new FileStatus(len, (type == FILE_TYPE.DIRECTORY), replication, blockSize, mTime, aTime, permission, owner, group, path);
break;
case SYMLINK:
Path symLink = null;
fileStatus = new FileStatus(len, false, replication, blockSize, mTime, aTime, permission, owner, group, symLink, path);
}
return fileStatus;
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestFileStatus method testGetFileStatusOnFile.
/** Test the FileStatus obtained calling getFileStatus on a file */
@Test
public void testGetFileStatusOnFile() throws Exception {
checkFile(fs, file1, 1);
// test getFileStatus on a file
FileStatus status = fs.getFileStatus(file1);
assertFalse(file1 + " should be a file", status.isDirectory());
assertEquals(blockSize, status.getBlockSize());
assertEquals(1, status.getReplication());
assertEquals(fileSize, status.getLen());
assertEquals(file1.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString(), status.getPath().toString());
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestINodeFile method testInodeIdBasedPaths.
/**
* Tests for addressing files using /.reserved/.inodes/<inodeID> in file system
* operations.
*/
@Test
public void testInodeIdBasedPaths() throws Exception {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
NamenodeProtocols nnRpc = cluster.getNameNodeRpc();
// FileSystem#mkdirs "/testInodeIdBasedPaths"
Path baseDir = getInodePath(INodeId.ROOT_INODE_ID, "testInodeIdBasedPaths");
Path baseDirRegPath = new Path("/testInodeIdBasedPaths");
fs.mkdirs(baseDir);
fs.exists(baseDir);
long baseDirFileId = nnRpc.getFileInfo(baseDir.toString()).getFileId();
// FileSystem#create file and FileSystem#close
Path testFileInodePath = getInodePath(baseDirFileId, "test1");
Path testFileRegularPath = new Path(baseDir, "test1");
final int testFileBlockSize = 1024;
FileSystemTestHelper.createFile(fs, testFileInodePath, 1, testFileBlockSize);
assertTrue(fs.exists(testFileInodePath));
// FileSystem#setPermission
FsPermission perm = new FsPermission((short) 0666);
fs.setPermission(testFileInodePath, perm);
// FileSystem#getFileStatus and FileSystem#getPermission
FileStatus fileStatus = fs.getFileStatus(testFileInodePath);
assertEquals(perm, fileStatus.getPermission());
// FileSystem#setOwner
fs.setOwner(testFileInodePath, fileStatus.getOwner(), fileStatus.getGroup());
// FileSystem#setTimes
fs.setTimes(testFileInodePath, 0, 0);
fileStatus = fs.getFileStatus(testFileInodePath);
assertEquals(0, fileStatus.getModificationTime());
assertEquals(0, fileStatus.getAccessTime());
// FileSystem#setReplication
fs.setReplication(testFileInodePath, (short) 3);
fileStatus = fs.getFileStatus(testFileInodePath);
assertEquals(3, fileStatus.getReplication());
fs.setReplication(testFileInodePath, (short) 1);
// ClientProtocol#getPreferredBlockSize
assertEquals(testFileBlockSize, nnRpc.getPreferredBlockSize(testFileInodePath.toString()));
/*
* HDFS-6749 added missing calls to FSDirectory.resolvePath in the
* following four methods. The calls below ensure that
* /.reserved/.inodes paths work properly. No need to check return
* values as these methods are tested elsewhere.
*/
{
fs.isFileClosed(testFileInodePath);
fs.getAclStatus(testFileInodePath);
fs.getXAttrs(testFileInodePath);
fs.listXAttrs(testFileInodePath);
fs.access(testFileInodePath, FsAction.READ_WRITE);
}
// symbolic link related tests
// Reserved path is not allowed as a target
String invalidTarget = new Path(baseDir, "invalidTarget").toString();
String link = new Path(baseDir, "link").toString();
testInvalidSymlinkTarget(nnRpc, invalidTarget, link);
// Test creating a link using reserved inode path
String validTarget = "/validtarget";
testValidSymlinkTarget(nnRpc, validTarget, link);
// FileSystem#append
fs.append(testFileInodePath);
// DistributedFileSystem#recoverLease
fs.recoverLease(testFileInodePath);
// Namenode#getBlockLocations
LocatedBlocks l1 = nnRpc.getBlockLocations(testFileInodePath.toString(), 0, Long.MAX_VALUE);
LocatedBlocks l2 = nnRpc.getBlockLocations(testFileRegularPath.toString(), 0, Long.MAX_VALUE);
checkEquals(l1, l2);
// FileSystem#rename - both the variants
Path renameDst = getInodePath(baseDirFileId, "test2");
fileStatus = fs.getFileStatus(testFileInodePath);
// Rename variant 1: rename and rename bacck
fs.rename(testFileInodePath, renameDst);
fs.rename(renameDst, testFileInodePath);
assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
// Rename variant 2: rename and rename bacck
fs.rename(testFileInodePath, renameDst, Rename.OVERWRITE);
fs.rename(renameDst, testFileInodePath, Rename.OVERWRITE);
assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
// FileSystem#getContentSummary
assertEquals(fs.getContentSummary(testFileRegularPath).toString(), fs.getContentSummary(testFileInodePath).toString());
// FileSystem#listFiles
checkEquals(fs.listFiles(baseDirRegPath, false), fs.listFiles(baseDir, false));
// FileSystem#delete
fs.delete(testFileInodePath, true);
assertFalse(fs.exists(testFileInodePath));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TaskAttemptImpl method createLocalResource.
/**
* Create a {@link LocalResource} record with all the given parameters.
*/
private static LocalResource createLocalResource(FileSystem fc, Path file, LocalResourceType type, LocalResourceVisibility visibility) throws IOException {
FileStatus fstat = fc.getFileStatus(file);
URL resourceURL = URL.fromPath(fc.resolvePath(fstat.getPath()));
long resourceSize = fstat.getLen();
long resourceModificationTime = fstat.getModificationTime();
return LocalResource.newInstance(resourceURL, type, visibility, resourceSize, resourceModificationTime);
}
Aggregations