use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class CreateEditsLog method addFiles.
static void addFiles(FSEditLog editLog, int numFiles, short replication, int blocksPerFile, long startingBlockId, long blockSize, FileNameGenerator nameGenerator) {
PermissionStatus p = new PermissionStatus("joeDoe", "people", new FsPermission((short) 0777));
INodeId inodeId = new INodeId();
INodeDirectory dirInode = new INodeDirectory(inodeId.nextValue(), null, p, 0L);
editLog.logMkDir(BASE_PATH, dirInode);
BlockInfo[] blocks = new BlockInfo[blocksPerFile];
for (int iB = 0; iB < blocksPerFile; ++iB) {
blocks[iB] = new BlockInfoContiguous(new Block(0, blockSize, BLOCK_GENERATION_STAMP), replication);
}
long currentBlockId = startingBlockId;
long bidAtSync = startingBlockId;
for (int iF = 0; iF < numFiles; iF++) {
for (int iB = 0; iB < blocksPerFile; ++iB) {
blocks[iB].setBlockId(currentBlockId++);
}
final INodeFile inode = new INodeFile(inodeId.nextValue(), null, p, 0L, 0L, blocks, replication, blockSize);
inode.toUnderConstruction("", "");
// Append path to filename with information about blockIDs
String path = "_" + iF + "_B" + blocks[0].getBlockId() + "_to_B" + blocks[blocksPerFile - 1].getBlockId() + "_";
String filePath = nameGenerator.getNextFileName("");
filePath = filePath + path;
// Log the new sub directory in edits
if ((iF % nameGenerator.getFilesPerDirectory()) == 0) {
String currentDir = nameGenerator.getCurrentDir();
dirInode = new INodeDirectory(inodeId.nextValue(), null, p, 0L);
editLog.logMkDir(currentDir, dirInode);
}
INodeFile fileUc = new INodeFile(inodeId.nextValue(), null, p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
fileUc.toUnderConstruction("", "");
editLog.logOpenFile(filePath, fileUc, false, false);
editLog.logCloseFile(filePath, inode);
if (currentBlockId - bidAtSync >= 2000) {
// sync every 2K blocks
editLog.logSync();
bidAtSync = currentBlockId;
}
}
System.out.println("Created edits log in directory " + edits_dir);
System.out.println(" containing " + numFiles + " File-Creates, each file with " + blocksPerFile + " blocks");
System.out.println(" blocks range: " + startingBlockId + " to " + (currentBlockId - 1));
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class FSImageTestUtil method createAbortedLogWithMkdirs.
/**
* Create an aborted in-progress log in the given directory, containing
* only a specified number of "mkdirs" operations.
*/
public static void createAbortedLogWithMkdirs(File editsLogDir, int numDirs, long firstTxId, long newInodeId) throws IOException {
FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir);
editLog.setNextTxId(firstTxId);
editLog.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
PermissionStatus perms = PermissionStatus.createImmutable("fakeuser", "fakegroup", FsPermission.createImmutable((short) 0755));
for (int i = 1; i <= numDirs; i++) {
String dirName = "dir" + i;
INodeDirectory dir = new INodeDirectory(newInodeId + i - 1, DFSUtil.string2Bytes(dirName), perms, 0L);
editLog.logMkDir("/" + dirName, dir);
}
editLog.logSync();
editLog.abortCurrentLogSegment();
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class TestBlockPlacementPolicyRackFaultTolerant method setup.
@Before
public void setup() throws IOException {
StaticMapping.resetMap();
Configuration conf = new HdfsConfiguration();
final ArrayList<String> rackList = new ArrayList<String>();
final ArrayList<String> hostList = new ArrayList<String>();
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 2; j++) {
rackList.add("/rack" + i);
hostList.add("/host" + i + j);
}
}
conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, BlockPlacementPolicyRackFaultTolerant.class, BlockPlacementPolicy.class);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(hostList.size()).racks(rackList.toArray(new String[rackList.size()])).hosts(hostList.toArray(new String[hostList.size()])).build();
cluster.waitActive();
nameNodeRpc = cluster.getNameNodeRpc();
namesystem = cluster.getNamesystem();
perm = new PermissionStatus("TestBlockPlacementPolicyEC", null, FsPermission.getDefault());
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class FSEditLog method logOpenFile.
/**
* Add open lease record to edit log.
* Records the block locations of the last block.
*/
public void logOpenFile(String path, INodeFile newNode, boolean overwrite, boolean toLogRpcIds) {
Preconditions.checkArgument(newNode.isUnderConstruction());
PermissionStatus permissions = newNode.getPermissionStatus();
AddOp op = AddOp.getInstance(cache.get()).setInodeId(newNode.getId()).setPath(path).setReplication(newNode.getFileReplication()).setModificationTime(newNode.getModificationTime()).setAccessTime(newNode.getAccessTime()).setBlockSize(newNode.getPreferredBlockSize()).setBlocks(newNode.getBlocks()).setPermissionStatus(permissions).setClientName(newNode.getFileUnderConstructionFeature().getClientName()).setClientMachine(newNode.getFileUnderConstructionFeature().getClientMachine()).setOverwrite(overwrite).setStoragePolicyId(newNode.getLocalStoragePolicyID());
AclFeature f = newNode.getAclFeature();
if (f != null) {
op.setAclEntries(AclStorage.readINodeLogicalAcl(newNode));
}
XAttrFeature x = newNode.getXAttrFeature();
if (x != null) {
op.setXAttrs(x.getXAttrs());
}
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class PBImageDelimitedTextWriter method getEntry.
@Override
public String getEntry(String parent, INode inode) {
StringBuffer buffer = new StringBuffer();
String inodeName = inode.getName().toStringUtf8();
Path path = new Path(parent.isEmpty() ? "/" : parent, inodeName.isEmpty() ? "/" : inodeName);
buffer.append(path.toString());
PermissionStatus p = null;
boolean isDir = false;
boolean hasAcl = false;
switch(inode.getType()) {
case FILE:
INodeFile file = inode.getFile();
p = getPermission(file.getPermission());
hasAcl = file.hasAcl() && file.getAcl().getEntriesCount() > 0;
append(buffer, file.getReplication());
append(buffer, formatDate(file.getModificationTime()));
append(buffer, formatDate(file.getAccessTime()));
append(buffer, file.getPreferredBlockSize());
append(buffer, file.getBlocksCount());
append(buffer, FSImageLoader.getFileSize(file));
// NS_QUOTA
append(buffer, 0);
// DS_QUOTA
append(buffer, 0);
break;
case DIRECTORY:
INodeDirectory dir = inode.getDirectory();
p = getPermission(dir.getPermission());
hasAcl = dir.hasAcl() && dir.getAcl().getEntriesCount() > 0;
// Replication
append(buffer, 0);
append(buffer, formatDate(dir.getModificationTime()));
// Access time.
append(buffer, formatDate(0));
// Block size.
append(buffer, 0);
// Num blocks.
append(buffer, 0);
// Num bytes.
append(buffer, 0);
append(buffer, dir.getNsQuota());
append(buffer, dir.getDsQuota());
isDir = true;
break;
case SYMLINK:
INodeSymlink s = inode.getSymlink();
p = getPermission(s.getPermission());
// Replication
append(buffer, 0);
append(buffer, formatDate(s.getModificationTime()));
append(buffer, formatDate(s.getAccessTime()));
// Block size.
append(buffer, 0);
// Num blocks.
append(buffer, 0);
// Num bytes.
append(buffer, 0);
// NS_QUOTA
append(buffer, 0);
// DS_QUOTA
append(buffer, 0);
break;
default:
break;
}
assert p != null;
String dirString = isDir ? "d" : "-";
String aclString = hasAcl ? "+" : "";
append(buffer, dirString + p.getPermission().toString() + aclString);
append(buffer, p.getUserName());
append(buffer, p.getGroupName());
return buffer.toString();
}
Aggregations