use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestFsck method testStoragePoliciesCK.
/**
* Test storage policy display.
*/
@Test
public void testStoragePoliciesCK() throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
writeFile(dfs, "/testhot", "file", "HOT");
writeFile(dfs, "/testwarm", "file", "WARM");
writeFile(dfs, "/testcold", "file", "COLD");
String outStr = runFsck(conf, 0, true, "/", "-storagepolicies");
assertTrue(outStr.contains("DISK:3(HOT)"));
assertTrue(outStr.contains("DISK:1,ARCHIVE:2(WARM)"));
assertTrue(outStr.contains("ARCHIVE:3(COLD)"));
assertTrue(outStr.contains("All blocks satisfy specified storage policy."));
dfs.setStoragePolicy(new Path("/testhot"), "COLD");
dfs.setStoragePolicy(new Path("/testwarm"), "COLD");
outStr = runFsck(conf, 0, true, "/", "-storagepolicies");
assertTrue(outStr.contains("DISK:3(HOT)"));
assertTrue(outStr.contains("DISK:1,ARCHIVE:2(WARM)"));
assertTrue(outStr.contains("ARCHIVE:3(COLD)"));
assertFalse(outStr.contains("All blocks satisfy specified storage policy."));
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestFsck method testUpgradeDomain.
private void testUpgradeDomain(boolean defineUpgradeDomain, boolean displayUpgradeDomain) throws Exception {
final short replFactor = 1;
final short numDN = 1;
final long blockSize = 512;
final long fileSize = 1024;
final String upgradeDomain = "ud1";
final String[] racks = { "/rack1" };
final String[] hosts = { "127.0.0.1" };
HostsFileWriter hostsFileWriter = new HostsFileWriter();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
if (defineUpgradeDomain) {
conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY, CombinedHostFileManager.class, HostConfigManager.class);
hostsFileWriter.initialize(conf, "temp/fsckupgradedomain");
}
DistributedFileSystem dfs;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDN).hosts(hosts).racks(racks).build();
cluster.waitClusterUp();
dfs = cluster.getFileSystem();
// Configure the upgrade domain on the datanode
if (defineUpgradeDomain) {
DatanodeAdminProperties dnProp = new DatanodeAdminProperties();
DatanodeID datanodeID = cluster.getDataNodes().get(0).getDatanodeId();
dnProp.setHostName(datanodeID.getHostName());
dnProp.setPort(datanodeID.getXferPort());
dnProp.setUpgradeDomain(upgradeDomain);
hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { dnProp });
cluster.getFileSystem().refreshNodes();
}
// create files
final String testFile = new String("/testfile");
final Path path = new Path(testFile);
DFSTestUtil.createFile(dfs, path, fileSize, replFactor, 1000L);
DFSTestUtil.waitReplication(dfs, path, replFactor);
try {
String fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks", displayUpgradeDomain ? "-upgradedomains" : "-locations");
assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
String udValue = defineUpgradeDomain ? upgradeDomain : NamenodeFsck.UNDEFINED;
assertEquals(displayUpgradeDomain, fsckOut.contains("(ud=" + udValue + ")"));
} finally {
if (defineUpgradeDomain) {
hostsFileWriter.cleanup();
}
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestFsck method testFsckWithMaintenanceReplicas.
/**
* Test for blocks on maintenance hosts are not shown as missing.
*/
@Test(timeout = 90000)
public void testFsckWithMaintenanceReplicas() throws Exception {
final short replFactor = 2;
short numDn = 2;
final long blockSize = 512;
String[] hosts = { "host1", "host2" };
String[] racks = { "/rack1", "/rack2" };
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, replFactor);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY, replFactor);
DistributedFileSystem dfs;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts).racks(racks).build();
assertNotNull("Failed Cluster Creation", cluster);
cluster.waitClusterUp();
dfs = cluster.getFileSystem();
assertNotNull("Failed to get FileSystem", dfs);
DFSTestUtil util = new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build();
//create files
final String testFile = new String("/testfile");
final Path path = new Path(testFile);
util.createFile(dfs, path, 1024, replFactor, 1000L);
util.waitReplication(dfs, path, replFactor);
StringBuilder sb = new StringBuilder();
for (LocatedBlock lb : util.getAllBlocks(dfs, path)) {
sb.append(lb.getBlock().getLocalBlock().getBlockName() + " ");
}
String[] bIds = sb.toString().split(" ");
//make sure datanode that has replica is fine before maintenance
String outStr = runFsck(conf, 0, true, testFile);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
FSNamesystem fsn = cluster.getNameNode().getNamesystem();
BlockManager bm = fsn.getBlockManager();
DatanodeManager dnm = bm.getDatanodeManager();
DatanodeDescriptor dn = dnm.getDatanode(cluster.getDataNodes().get(0).getDatanodeId());
bm.getDatanodeManager().getDecomManager().startMaintenance(dn, Long.MAX_VALUE);
final String dnName = dn.getXferAddr();
//wait for the node to enter maintenance state
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
DatanodeInfo datanodeInfo = null;
try {
for (DatanodeInfo info : dfs.getDataNodeStats()) {
if (dnName.equals(info.getXferAddr())) {
datanodeInfo = info;
}
}
if (datanodeInfo != null && datanodeInfo.isEnteringMaintenance()) {
// verify fsck returns Healthy status
String fsckOut = runFsck(conf, 0, true, testFile, "-maintenance");
assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
return true;
}
} catch (Exception e) {
LOG.warn("Unexpected exception: " + e);
return false;
}
return false;
}
}, 500, 30000);
// Start 3rd DataNode and wait for node to reach in maintenance state
cluster.startDataNodes(conf, 1, true, null, new String[] { "/rack3" }, new String[] { "host3" }, null, false);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
DatanodeInfo datanodeInfo = null;
try {
for (DatanodeInfo info : dfs.getDataNodeStats()) {
if (dnName.equals(info.getXferAddr())) {
datanodeInfo = info;
}
}
if (datanodeInfo != null && datanodeInfo.isInMaintenance()) {
return true;
}
} catch (Exception e) {
LOG.warn("Unexpected exception: " + e);
return false;
}
return false;
}
}, 500, 30000);
// verify fsck returns Healthy status
String fsckOut = runFsck(conf, 0, true, testFile, "-maintenance");
assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
// verify fsck returns Healthy status even without maintenance option
fsckOut = runFsck(conf, 0, true, testFile);
assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestINodeFile method testInodeIdBasedPaths.
/**
* Tests for addressing files using /.reserved/.inodes/<inodeID> in file system
* operations.
*/
@Test
public void testInodeIdBasedPaths() throws Exception {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
NamenodeProtocols nnRpc = cluster.getNameNodeRpc();
// FileSystem#mkdirs "/testInodeIdBasedPaths"
Path baseDir = getInodePath(INodeId.ROOT_INODE_ID, "testInodeIdBasedPaths");
Path baseDirRegPath = new Path("/testInodeIdBasedPaths");
fs.mkdirs(baseDir);
fs.exists(baseDir);
long baseDirFileId = nnRpc.getFileInfo(baseDir.toString()).getFileId();
// FileSystem#create file and FileSystem#close
Path testFileInodePath = getInodePath(baseDirFileId, "test1");
Path testFileRegularPath = new Path(baseDir, "test1");
final int testFileBlockSize = 1024;
FileSystemTestHelper.createFile(fs, testFileInodePath, 1, testFileBlockSize);
assertTrue(fs.exists(testFileInodePath));
// FileSystem#setPermission
FsPermission perm = new FsPermission((short) 0666);
fs.setPermission(testFileInodePath, perm);
// FileSystem#getFileStatus and FileSystem#getPermission
FileStatus fileStatus = fs.getFileStatus(testFileInodePath);
assertEquals(perm, fileStatus.getPermission());
// FileSystem#setOwner
fs.setOwner(testFileInodePath, fileStatus.getOwner(), fileStatus.getGroup());
// FileSystem#setTimes
fs.setTimes(testFileInodePath, 0, 0);
fileStatus = fs.getFileStatus(testFileInodePath);
assertEquals(0, fileStatus.getModificationTime());
assertEquals(0, fileStatus.getAccessTime());
// FileSystem#setReplication
fs.setReplication(testFileInodePath, (short) 3);
fileStatus = fs.getFileStatus(testFileInodePath);
assertEquals(3, fileStatus.getReplication());
fs.setReplication(testFileInodePath, (short) 1);
// ClientProtocol#getPreferredBlockSize
assertEquals(testFileBlockSize, nnRpc.getPreferredBlockSize(testFileInodePath.toString()));
/*
* HDFS-6749 added missing calls to FSDirectory.resolvePath in the
* following four methods. The calls below ensure that
* /.reserved/.inodes paths work properly. No need to check return
* values as these methods are tested elsewhere.
*/
{
fs.isFileClosed(testFileInodePath);
fs.getAclStatus(testFileInodePath);
fs.getXAttrs(testFileInodePath);
fs.listXAttrs(testFileInodePath);
fs.access(testFileInodePath, FsAction.READ_WRITE);
}
// symbolic link related tests
// Reserved path is not allowed as a target
String invalidTarget = new Path(baseDir, "invalidTarget").toString();
String link = new Path(baseDir, "link").toString();
testInvalidSymlinkTarget(nnRpc, invalidTarget, link);
// Test creating a link using reserved inode path
String validTarget = "/validtarget";
testValidSymlinkTarget(nnRpc, validTarget, link);
// FileSystem#append
fs.append(testFileInodePath);
// DistributedFileSystem#recoverLease
fs.recoverLease(testFileInodePath);
// Namenode#getBlockLocations
LocatedBlocks l1 = nnRpc.getBlockLocations(testFileInodePath.toString(), 0, Long.MAX_VALUE);
LocatedBlocks l2 = nnRpc.getBlockLocations(testFileRegularPath.toString(), 0, Long.MAX_VALUE);
checkEquals(l1, l2);
// FileSystem#rename - both the variants
Path renameDst = getInodePath(baseDirFileId, "test2");
fileStatus = fs.getFileStatus(testFileInodePath);
// Rename variant 1: rename and rename bacck
fs.rename(testFileInodePath, renameDst);
fs.rename(renameDst, testFileInodePath);
assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
// Rename variant 2: rename and rename bacck
fs.rename(testFileInodePath, renameDst, Rename.OVERWRITE);
fs.rename(renameDst, testFileInodePath, Rename.OVERWRITE);
assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
// FileSystem#getContentSummary
assertEquals(fs.getContentSummary(testFileRegularPath).toString(), fs.getContentSummary(testFileInodePath).toString());
// FileSystem#listFiles
checkEquals(fs.listFiles(baseDirRegPath, false), fs.listFiles(baseDir, false));
// FileSystem#delete
fs.delete(testFileInodePath, true);
assertFalse(fs.exists(testFileInodePath));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestLeaseManager method testLeaseRestorationOnRestart.
/**
* Make sure the lease is restored even if only the inode has the record.
*/
@Test
public void testLeaseRestorationOnRestart() throws Exception {
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(1).build();
DistributedFileSystem dfs = cluster.getFileSystem();
// Create an empty file
String path = "/testLeaseRestorationOnRestart";
FSDataOutputStream out = dfs.create(new Path(path));
// Remove the lease from the lease manager, but leave it in the inode.
FSDirectory dir = cluster.getNamesystem().getFSDirectory();
INodeFile file = dir.getINode(path).asFile();
cluster.getNamesystem().leaseManager.removeLease(file.getFileUnderConstructionFeature().getClientName(), file);
// Save a fsimage.
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
cluster.getNameNodeRpc().saveNamespace(0, 0);
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
// Restart the namenode.
cluster.restartNameNode(true);
// Check whether the lease manager has the lease
dir = cluster.getNamesystem().getFSDirectory();
file = dir.getINode(path).asFile();
assertTrue("Lease should exist.", cluster.getNamesystem().leaseManager.getLease(file) != null);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations