use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestLeaseManager method testLeaseRestorationOnRestart.
/**
* Make sure the lease is restored even if only the inode has the record.
*/
@Test
public void testLeaseRestorationOnRestart() throws Exception {
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(1).build();
DistributedFileSystem dfs = cluster.getFileSystem();
// Create an empty file
String path = "/testLeaseRestorationOnRestart";
FSDataOutputStream out = dfs.create(new Path(path));
// Remove the lease from the lease manager, but leave it in the inode.
FSDirectory dir = cluster.getNamesystem().getFSDirectory();
INodeFile file = dir.getINode(path).asFile();
cluster.getNamesystem().leaseManager.removeLease(file.getFileUnderConstructionFeature().getClientName(), file);
// Save a fsimage.
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
cluster.getNameNodeRpc().saveNamespace(0, 0);
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
// Restart the namenode.
cluster.restartNameNode(true);
// Check whether the lease manager has the lease
dir = cluster.getNamesystem().getFSDirectory();
file = dir.getINode(path).asFile();
assertTrue("Lease should exist.", cluster.getNamesystem().leaseManager.getLease(file) != null);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestINodeFile method testInodeId.
/**
* This test verifies inode ID counter and inode map functionality.
*/
@Test
public void testInodeId() throws IOException {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNamesystem();
long lastId = fsn.dir.getLastInodeId();
// Ensure root has the correct inode ID
// Last inode ID should be root inode ID and inode map size should be 1
int inodeCount = 1;
long expectedLastInodeId = INodeId.ROOT_INODE_ID;
assertEquals(fsn.dir.rootDir.getId(), INodeId.ROOT_INODE_ID);
assertEquals(expectedLastInodeId, lastId);
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Create a directory
// Last inode ID and inode map size should increase by 1
FileSystem fs = cluster.getFileSystem();
Path path = new Path("/test1");
assertTrue(fs.mkdirs(path));
assertEquals(++expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(++inodeCount, fsn.dir.getInodeMapSize());
// Create a file
// Last inode ID and inode map size should increase by 1
NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
DFSTestUtil.createFile(fs, new Path("/test1/file"), 1024, (short) 1, 0);
assertEquals(++expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(++inodeCount, fsn.dir.getInodeMapSize());
// Ensure right inode ID is returned in file status
HdfsFileStatus fileStatus = nnrpc.getFileInfo("/test1/file");
assertEquals(expectedLastInodeId, fileStatus.getFileId());
// Rename a directory
// Last inode ID and inode map size should not change
Path renamedPath = new Path("/test2");
assertTrue(fs.rename(path, renamedPath));
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Delete test2/file and test2 and ensure inode map size decreases
assertTrue(fs.delete(renamedPath, true));
inodeCount -= 2;
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Create and concat /test/file1 /test/file2
// Create /test1/file1 and /test1/file2
String file1 = "/test1/file1";
String file2 = "/test1/file2";
DFSTestUtil.createFile(fs, new Path(file1), 512, (short) 1, 0);
DFSTestUtil.createFile(fs, new Path(file2), 512, (short) 1, 0);
// test1, file1 and file2 are created
inodeCount += 3;
expectedLastInodeId += 3;
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
// Concat the /test1/file1 /test1/file2 into /test1/file2
nnrpc.concat(file2, new String[] { file1 });
// file1 and file2 are concatenated to file2
inodeCount--;
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertTrue(fs.delete(new Path("/test1"), true));
// test1 and file2 is deleted
inodeCount -= 2;
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Make sure editlog is loaded correctly
cluster.restartNameNode();
cluster.waitActive();
fsn = cluster.getNamesystem();
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Create two inodes test2 and test2/file2
DFSTestUtil.createFile(fs, new Path("/test2/file2"), 1024, (short) 1, 0);
expectedLastInodeId += 2;
inodeCount += 2;
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// create /test3, and /test3/file.
// /test3/file is a file under construction
FSDataOutputStream outStream = fs.create(new Path("/test3/file"));
assertTrue(outStream != null);
expectedLastInodeId += 2;
inodeCount += 2;
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Apply editlogs to fsimage, ensure inodeUnderConstruction is handled
fsn.enterSafeMode(false);
fsn.saveNamespace(0, 0);
fsn.leaveSafeMode(false);
outStream.close();
// The lastInodeId in fsimage should remain the same after reboot
cluster.restartNameNode();
cluster.waitActive();
fsn = cluster.getNamesystem();
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestVLong method testVLongRandom.
@Test
public void testVLongRandom() throws IOException {
int count = 1024 * 1024;
long[] data = new long[count];
Random rng = new Random();
for (int i = 0; i < data.length; ++i) {
int shift = rng.nextInt(Long.SIZE) + 1;
long mask = (1L << shift) - 1;
long a = ((long) rng.nextInt()) << 32;
long b = ((long) rng.nextInt()) & 0xffffffffL;
data[i] = (a + b) & mask;
}
FSDataOutputStream out = fs.create(path);
for (int i = 0; i < data.length; ++i) {
Utils.writeVLong(out, data[i]);
}
out.close();
FSDataInputStream in = fs.open(path);
for (int i = 0; i < data.length; ++i) {
Assert.assertEquals(Utils.readVLong(in), data[i]);
}
in.close();
fs.delete(path, false);
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestVLong method testVLongByte.
@Test
public void testVLongByte() throws IOException {
FSDataOutputStream out = fs.create(path);
for (int i = Byte.MIN_VALUE; i <= Byte.MAX_VALUE; ++i) {
Utils.writeVLong(out, i);
}
out.close();
Assert.assertEquals("Incorrect encoded size", (1 << Byte.SIZE) + 96, fs.getFileStatus(path).getLen());
FSDataInputStream in = fs.open(path);
for (int i = Byte.MIN_VALUE; i <= Byte.MAX_VALUE; ++i) {
long n = Utils.readVLong(in);
Assert.assertEquals(n, i);
}
in.close();
fs.delete(path, false);
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestVLong method writeAndVerify.
private long writeAndVerify(int shift) throws IOException {
FSDataOutputStream out = fs.create(path);
for (int i = Short.MIN_VALUE; i <= Short.MAX_VALUE; ++i) {
Utils.writeVLong(out, ((long) i) << shift);
}
out.close();
FSDataInputStream in = fs.open(path);
for (int i = Short.MIN_VALUE; i <= Short.MAX_VALUE; ++i) {
long n = Utils.readVLong(in);
Assert.assertEquals(n, ((long) i) << shift);
}
in.close();
long ret = fs.getFileStatus(path).getLen();
fs.delete(path, false);
return ret;
}
Aggregations