use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestEncryptionZoneManager method setup.
@Before
public void setup() {
this.mockedDir = mock(FSDirectory.class);
this.mockedINodesInPath = mock(INodesInPath.class);
this.defaultPermission = new PermissionStatus("test", "test", new FsPermission((short) 755));
this.rootINode = new INodeDirectory(0L, "".getBytes(), defaultPermission, System.currentTimeMillis());
this.firstINode = new INodeDirectory(1L, "first".getBytes(), defaultPermission, System.currentTimeMillis());
this.secondINode = new INodeDirectory(2L, "second".getBytes(), defaultPermission, System.currentTimeMillis());
when(this.mockedDir.hasReadLock()).thenReturn(true);
when(this.mockedDir.hasWriteLock()).thenReturn(true);
when(this.mockedDir.getInode(0L)).thenReturn(rootINode);
when(this.mockedDir.getInode(1L)).thenReturn(firstINode);
when(this.mockedDir.getInode(2L)).thenReturn(secondINode);
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestFSEditLogLoader method testAddNewStripedBlock.
@Test
public void testAddNewStripedBlock() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, testECPolicy.getName());
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
FSNamesystem fns = cluster.getNamesystem();
String testDir = "/ec";
String testFile = "testfile_001";
String testFilePath = testDir + "/" + testFile;
String clientName = "testUser1";
String clientMachine = "testMachine1";
long blkId = 1;
long blkNumBytes = 1024;
long timestamp = 1426222918;
short blockNum = (short) testECPolicy.getNumDataUnits();
short parityNum = (short) testECPolicy.getNumParityUnits();
//set the storage policy of the directory
fs.mkdir(new Path(testDir), new FsPermission("755"));
fs.getClient().getNamenode().setErasureCodingPolicy(testDir, testECPolicy.getName());
// Create a file with striped block
Path p = new Path(testFilePath);
DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
fns.enterSafeMode(false);
fns.saveNamespace(0, 0);
fns.leaveSafeMode(false);
// Add a striped block to the file
BlockInfoStriped stripedBlk = new BlockInfoStriped(new Block(blkId, blkNumBytes, timestamp), testECPolicy);
INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
file.toUnderConstruction(clientName, clientMachine);
file.addBlock(stripedBlk);
fns.getEditLog().logAddBlock(testFilePath, file);
TestINodeFile.toCompleteFile(file);
//If the block by loaded is the same as above it means that
//we have successfully applied the edit log to the fsimage.
cluster.restartNameNodes();
cluster.waitActive();
fns = cluster.getNamesystem();
INodeFile inodeLoaded = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
assertTrue(inodeLoaded.isStriped());
BlockInfo[] blks = inodeLoaded.getBlocks();
assertEquals(1, blks.length);
assertEquals(blkId, blks[0].getBlockId());
assertEquals(blkNumBytes, blks[0].getNumBytes());
assertEquals(timestamp, blks[0].getGenerationStamp());
assertEquals(blockNum, ((BlockInfoStriped) blks[0]).getDataBlockNum());
assertEquals(parityNum, ((BlockInfoStriped) blks[0]).getParityBlockNum());
cluster.shutdown();
cluster = null;
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestFSImage method testHasNonEcBlockUsingStripedIDForLoadUCFile.
@Test
public void testHasNonEcBlockUsingStripedIDForLoadUCFile() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
FSNamesystem fns = cluster.getNamesystem();
String testDir = "/test_block_manager";
String testFile = "testfile_loaducfile";
String testFilePath = testDir + "/" + testFile;
String clientName = "testUser_loaducfile";
String clientMachine = "testMachine_loaducfile";
long blkId = -1;
long blkNumBytes = 1024;
long timestamp = 1426222918;
fs.mkdir(new Path(testDir), new FsPermission("755"));
Path p = new Path(testFilePath);
DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
file.toUnderConstruction(clientName, clientMachine);
file.addBlock(cBlk);
fns.enterSafeMode(false);
fns.saveNamespace(0, 0);
cluster.restartNameNodes();
cluster.waitActive();
fns = cluster.getNamesystem();
assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
cluster.shutdown();
cluster = null;
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestFSImage method testHasNonEcBlockUsingStripedIDForLoadFile.
@Test
public void testHasNonEcBlockUsingStripedIDForLoadFile() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
FSNamesystem fns = cluster.getNamesystem();
String testDir = "/test_block_manager";
String testFile = "testfile_loadfile";
String testFilePath = testDir + "/" + testFile;
String clientName = "testUser_loadfile";
String clientMachine = "testMachine_loadfile";
long blkId = -1;
long blkNumBytes = 1024;
long timestamp = 1426222918;
fs.mkdir(new Path(testDir), new FsPermission("755"));
Path p = new Path(testFilePath);
DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
file.toUnderConstruction(clientName, clientMachine);
file.addBlock(cBlk);
TestINodeFile.toCompleteFile(file);
fns.enterSafeMode(false);
fns.saveNamespace(0, 0);
cluster.restartNameNodes();
cluster.waitActive();
fns = cluster.getNamesystem();
assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
//after nonEcBlockUsingStripedID is deleted
//the hasNonEcBlockUsingStripedID is set to false
fs = cluster.getFileSystem();
fs.delete(p, false);
fns.enterSafeMode(false);
fns.saveNamespace(0, 0);
cluster.restartNameNodes();
cluster.waitActive();
fns = cluster.getNamesystem();
assertFalse(fns.getBlockManager().hasNonEcBlockUsingStripedID());
cluster.shutdown();
cluster = null;
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestFSImage method testHasNonEcBlockUsingStripedIDForLoadSnapshot.
@Test
public void testHasNonEcBlockUsingStripedIDForLoadSnapshot() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
FSNamesystem fns = cluster.getNamesystem();
String testDir = "/test_block_manager";
String testFile = "testfile_loadSnapshot";
String testFilePath = testDir + "/" + testFile;
String clientName = "testUser_loadSnapshot";
String clientMachine = "testMachine_loadSnapshot";
long blkId = -1;
long blkNumBytes = 1024;
long timestamp = 1426222918;
Path d = new Path(testDir);
fs.mkdir(d, new FsPermission("755"));
fs.allowSnapshot(d);
Path p = new Path(testFilePath);
DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
file.toUnderConstruction(clientName, clientMachine);
file.addBlock(cBlk);
TestINodeFile.toCompleteFile(file);
fs.createSnapshot(d, "testHasNonEcBlockUsingStripeID");
fs.truncate(p, 0);
fns.enterSafeMode(false);
fns.saveNamespace(0, 0);
cluster.restartNameNodes();
cluster.waitActive();
fns = cluster.getNamesystem();
assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
cluster.shutdown();
cluster = null;
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations