use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestEditLog method testReadActivelyUpdatedLog.
/**
*
* @throws Exception
*/
@Test
public void testReadActivelyUpdatedLog() throws Exception {
final TestAppender appender = new TestAppender();
LogManager.getRootLogger().addAppender(appender);
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
// Set single handler thread, so all transactions hit same thread-local ops.
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY, 1);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FSImage fsimage = cluster.getNamesystem().getFSImage();
StorageDirectory sd = fsimage.getStorage().getStorageDir(0);
final DistributedFileSystem fileSys = cluster.getFileSystem();
DFSInotifyEventInputStream events = fileSys.getInotifyEventStream();
fileSys.mkdirs(new Path("/test"));
fileSys.mkdirs(new Path("/test/dir1"));
fileSys.delete(new Path("/test/dir1"), true);
fsimage.getEditLog().logSync();
fileSys.mkdirs(new Path("/test/dir2"));
final File inProgressEdit = NNStorage.getInProgressEditsFile(sd, 1);
assertTrue(inProgressEdit.exists());
EditLogFileInputStream elis = new EditLogFileInputStream(inProgressEdit);
FSEditLogOp op;
long pos = 0;
while (true) {
op = elis.readOp();
if (op != null && op.opCode != FSEditLogOpCodes.OP_INVALID) {
pos = elis.getPosition();
} else {
break;
}
}
elis.close();
assertTrue(pos > 0);
RandomAccessFile rwf = new RandomAccessFile(inProgressEdit, "rw");
rwf.seek(pos);
assertEquals(rwf.readByte(), (byte) -1);
rwf.seek(pos + 1);
rwf.writeByte(2);
rwf.close();
events.poll();
String pattern = "Caught exception after reading (.*) ops";
Pattern r = Pattern.compile(pattern);
final List<LoggingEvent> log = appender.getLog();
for (LoggingEvent event : log) {
Matcher m = r.matcher(event.getRenderedMessage());
if (m.find()) {
fail("Should not try to read past latest syned edit log op");
}
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
LogManager.getRootLogger().removeAppender(appender);
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestFSEditLogLoader method testAddNewStripedBlock.
@Test
public void testAddNewStripedBlock() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, testECPolicy.getName());
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
FSNamesystem fns = cluster.getNamesystem();
String testDir = "/ec";
String testFile = "testfile_001";
String testFilePath = testDir + "/" + testFile;
String clientName = "testUser1";
String clientMachine = "testMachine1";
long blkId = 1;
long blkNumBytes = 1024;
long timestamp = 1426222918;
short blockNum = (short) testECPolicy.getNumDataUnits();
short parityNum = (short) testECPolicy.getNumParityUnits();
//set the storage policy of the directory
fs.mkdir(new Path(testDir), new FsPermission("755"));
fs.getClient().getNamenode().setErasureCodingPolicy(testDir, testECPolicy.getName());
// Create a file with striped block
Path p = new Path(testFilePath);
DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
fns.enterSafeMode(false);
fns.saveNamespace(0, 0);
fns.leaveSafeMode(false);
// Add a striped block to the file
BlockInfoStriped stripedBlk = new BlockInfoStriped(new Block(blkId, blkNumBytes, timestamp), testECPolicy);
INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
file.toUnderConstruction(clientName, clientMachine);
file.addBlock(stripedBlk);
fns.getEditLog().logAddBlock(testFilePath, file);
TestINodeFile.toCompleteFile(file);
//If the block by loaded is the same as above it means that
//we have successfully applied the edit log to the fsimage.
cluster.restartNameNodes();
cluster.waitActive();
fns = cluster.getNamesystem();
INodeFile inodeLoaded = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
assertTrue(inodeLoaded.isStriped());
BlockInfo[] blks = inodeLoaded.getBlocks();
assertEquals(1, blks.length);
assertEquals(blkId, blks[0].getBlockId());
assertEquals(blkNumBytes, blks[0].getNumBytes());
assertEquals(timestamp, blks[0].getGenerationStamp());
assertEquals(blockNum, ((BlockInfoStriped) blks[0]).getDataBlockNum());
assertEquals(parityNum, ((BlockInfoStriped) blks[0]).getParityBlockNum());
cluster.shutdown();
cluster = null;
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestFSImage method testDigest.
/**
* Ensure that the digest written by the saver equals to the digest of the
* file.
*/
@Test
public void testDigest() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
DistributedFileSystem fs = cluster.getFileSystem();
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
File currentDir = FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(0);
File fsimage = FSImageTestUtil.findNewestImageFile(currentDir.getAbsolutePath());
assertEquals(MD5FileUtils.readStoredMd5ForFile(fsimage), MD5FileUtils.computeMd5ForFile(fsimage));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestFSImage method testHasNonEcBlockUsingStripedIDForLoadUCFile.
@Test
public void testHasNonEcBlockUsingStripedIDForLoadUCFile() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
FSNamesystem fns = cluster.getNamesystem();
String testDir = "/test_block_manager";
String testFile = "testfile_loaducfile";
String testFilePath = testDir + "/" + testFile;
String clientName = "testUser_loaducfile";
String clientMachine = "testMachine_loaducfile";
long blkId = -1;
long blkNumBytes = 1024;
long timestamp = 1426222918;
fs.mkdir(new Path(testDir), new FsPermission("755"));
Path p = new Path(testFilePath);
DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
file.toUnderConstruction(clientName, clientMachine);
file.addBlock(cBlk);
fns.enterSafeMode(false);
fns.saveNamespace(0, 0);
cluster.restartNameNodes();
cluster.waitActive();
fns = cluster.getNamesystem();
assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
cluster.shutdown();
cluster = null;
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestFSImage method testPersistHelper.
private void testPersistHelper(Configuration conf) throws IOException {
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNamesystem();
DistributedFileSystem fs = cluster.getFileSystem();
final Path dir = new Path("/abc/def");
final Path file1 = new Path(dir, "f1");
final Path file2 = new Path(dir, "f2");
// create an empty file f1
fs.create(file1).close();
// create an under-construction file f2
FSDataOutputStream out = fs.create(file2);
out.writeBytes("hello");
((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
// checkpoint
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode();
cluster.waitActive();
fs = cluster.getFileSystem();
assertTrue(fs.isDirectory(dir));
assertTrue(fs.exists(file1));
assertTrue(fs.exists(file2));
// check internals of file2
INodeFile file2Node = fsn.dir.getINode4Write(file2.toString()).asFile();
assertEquals("hello".length(), file2Node.computeFileSize());
assertTrue(file2Node.isUnderConstruction());
BlockInfo[] blks = file2Node.getBlocks();
assertEquals(1, blks.length);
assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState());
// check lease manager
Lease lease = fsn.leaseManager.getLease(file2Node);
Assert.assertNotNull(lease);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations