use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestFSRMStateStore method verifyFilesUnreadablebyHDFS.
private void verifyFilesUnreadablebyHDFS(MiniDFSCluster cluster, Path root) throws Exception {
DistributedFileSystem fs = cluster.getFileSystem();
Queue<Path> paths = new LinkedList<>();
paths.add(root);
while (!paths.isEmpty()) {
Path p = paths.poll();
FileStatus stat = fs.getFileStatus(p);
if (!stat.isDirectory()) {
try {
LOG.warn("\n\n ##Testing path [" + p + "]\n\n");
fs.open(p);
Assert.fail("Super user should not be able to read [" + UserGroupInformation.getCurrentUser() + "] [" + p.getName() + "]");
} catch (AccessControlException e) {
Assert.assertTrue(e.getMessage().contains("superuser is not allowed to perform this operation"));
} catch (Exception e) {
Assert.fail("Should get an AccessControlException here");
}
}
if (stat.isDirectory()) {
FileStatus[] ls = fs.listStatus(p);
for (FileStatus f : ls) {
paths.add(f.getPath());
}
}
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class DistCpSync method preSyncCheck.
/**
* Check if three conditions are met before sync.
* 1. Only one source directory.
* 2. Both source and target file system are DFS.
* 3. There is no change between from and the current status in target
* file system.
* Throw exceptions if first two aren't met, and return false to fallback to
* default distcp if the third condition isn't met.
*/
private boolean preSyncCheck() throws IOException {
List<Path> sourcePaths = inputOptions.getSourcePaths();
if (sourcePaths.size() != 1) {
// we only support one source dir which must be a snapshottable directory
throw new IllegalArgumentException(sourcePaths.size() + " source paths are provided");
}
final Path sourceDir = sourcePaths.get(0);
final Path targetDir = inputOptions.getTargetPath();
final FileSystem srcFs = sourceDir.getFileSystem(conf);
final FileSystem tgtFs = targetDir.getFileSystem(conf);
final FileSystem snapshotDiffFs = isRdiff() ? tgtFs : srcFs;
final Path snapshotDiffDir = isRdiff() ? targetDir : sourceDir;
// DistributedFileSystem.
if (!(srcFs instanceof DistributedFileSystem) || !(tgtFs instanceof DistributedFileSystem)) {
throw new IllegalArgumentException("The FileSystems needs to" + " be DistributedFileSystem for using snapshot-diff-based distcp");
}
final DistributedFileSystem targetFs = (DistributedFileSystem) tgtFs;
// make sure targetFS has no change between from and the current states
if (!checkNoChange(targetFs, targetDir)) {
// set the source path using the snapshot path
inputOptions.setSourcePaths(Arrays.asList(getSnapshotPath(sourceDir, inputOptions.getToSnapshot())));
return false;
}
final String from = getSnapshotName(inputOptions.getFromSnapshot());
final String to = getSnapshotName(inputOptions.getToSnapshot());
try {
final FileStatus fromSnapshotStat = snapshotDiffFs.getFileStatus(getSnapshotPath(snapshotDiffDir, from));
final FileStatus toSnapshotStat = snapshotDiffFs.getFileStatus(getSnapshotPath(snapshotDiffDir, to));
if (isRdiff()) {
// If fromSnapshot isn't current dir then do a time check
if (!from.equals("") && fromSnapshotStat.getModificationTime() < toSnapshotStat.getModificationTime()) {
throw new HadoopIllegalArgumentException("Snapshot " + from + " should be newer than " + to);
}
} else {
// If toSnapshot isn't current dir then do a time check
if (!to.equals("") && fromSnapshotStat.getModificationTime() > toSnapshotStat.getModificationTime()) {
throw new HadoopIllegalArgumentException("Snapshot " + to + " should be newer than " + from);
}
}
} catch (FileNotFoundException nfe) {
throw new InvalidInputException("Input snapshot is not found", nfe);
}
return true;
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestDFSIO method checkStoragePolicy.
private boolean checkStoragePolicy(String storagePolicy, FileSystem fs) throws IOException {
boolean isValid = false;
Collection<BlockStoragePolicy> storagePolicies = ((DistributedFileSystem) fs).getAllStoragePolicies();
try {
for (BlockStoragePolicy policy : storagePolicies) {
if (policy.getName().equals(storagePolicy)) {
isValid = true;
break;
}
}
} catch (Exception e) {
throw new IOException("Get block storage policies error: ", e);
}
if (!isValid) {
System.out.println("Invalid block storage policy: " + storagePolicy);
System.out.println("Current supported storage policy list: ");
for (BlockStoragePolicy policy : storagePolicies) {
System.out.println(policy.getName());
}
return false;
}
config.set(STORAGE_POLICY_NAME_KEY, storagePolicy);
LOG.info("storagePolicy = " + storagePolicy);
return true;
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestBlockRecovery method testRaceBetweenReplicaRecoveryAndFinalizeBlock.
/**
* Test to verify the race between finalizeBlock and Lease recovery
*
* @throws Exception
*/
@Test(timeout = 20000)
public void testRaceBetweenReplicaRecoveryAndFinalizeBlock() throws Exception {
// Stop the Mocked DN started in startup()
tearDown();
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, "1000");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
cluster.waitClusterUp();
DistributedFileSystem fs = cluster.getFileSystem();
Path path = new Path("/test");
FSDataOutputStream out = fs.create(path);
out.writeBytes("data");
out.hsync();
List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs.open(path));
final LocatedBlock block = blocks.get(0);
final DataNode dataNode = cluster.getDataNodes().get(0);
final AtomicBoolean recoveryInitResult = new AtomicBoolean(true);
Thread recoveryThread = new Thread() {
@Override
public void run() {
try {
DatanodeInfo[] locations = block.getLocations();
final RecoveringBlock recoveringBlock = new RecoveringBlock(block.getBlock(), locations, block.getBlock().getGenerationStamp() + 1);
try (AutoCloseableLock lock = dataNode.data.acquireDatasetLock()) {
Thread.sleep(2000);
dataNode.initReplicaRecovery(recoveringBlock);
}
} catch (Exception e) {
recoveryInitResult.set(false);
}
}
};
recoveryThread.start();
try {
out.close();
} catch (IOException e) {
Assert.assertTrue("Writing should fail", e.getMessage().contains("are bad. Aborting..."));
} finally {
recoveryThread.join();
}
Assert.assertTrue("Recovery should be initiated successfully", recoveryInitResult.get());
dataNode.updateReplicaUnderRecovery(block.getBlock(), block.getBlock().getGenerationStamp() + 1, block.getBlock().getBlockId(), block.getBlockSize());
} finally {
if (null != cluster) {
cluster.shutdown();
cluster = null;
}
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestBlockReplacement method testBlockMoveAcrossStorageInSameNode.
@Test
public void testBlockMoveAcrossStorageInSameNode() throws Exception {
final Configuration conf = new HdfsConfiguration();
// create only one datanode in the cluster to verify movement within
// datanode.
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final Path file = new Path("/testBlockMoveAcrossStorageInSameNode/file");
DFSTestUtil.createFile(dfs, file, 1024, (short) 1, 1024);
LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file.toString(), 0);
// get the current
LocatedBlock locatedBlock = locatedBlocks.get(0);
ExtendedBlock block = locatedBlock.getBlock();
DatanodeInfo[] locations = locatedBlock.getLocations();
assertEquals(1, locations.length);
StorageType[] storageTypes = locatedBlock.getStorageTypes();
// current block should be written to DISK
assertTrue(storageTypes[0] == StorageType.DISK);
DatanodeInfo source = locations[0];
// move block to ARCHIVE by using same DataNodeInfo for source, proxy and
// destination so that movement happens within datanode
assertTrue(replaceBlock(block, source, source, source, StorageType.ARCHIVE, Status.SUCCESS));
// wait till namenode notified
Thread.sleep(3000);
locatedBlocks = dfs.getClient().getLocatedBlocks(file.toString(), 0);
// get the current
locatedBlock = locatedBlocks.get(0);
assertEquals("Storage should be only one", 1, locatedBlock.getLocations().length);
assertTrue("Block should be moved to ARCHIVE", locatedBlock.getStorageTypes()[0] == StorageType.ARCHIVE);
} finally {
cluster.shutdown();
}
}
Aggregations