use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class DFSAdmin method refreshServiceAcl.
/**
* Refresh the authorization policy on the {@link NameNode}.
* @return exitcode 0 on success, non-zero on failure
* @throws IOException
*/
public int refreshServiceAcl() throws IOException {
// Get the current configuration
Configuration conf = getConf();
// for security authorization
// server principal for this call
// should be NN's one.
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
DistributedFileSystem dfs = getDFS();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri);
if (isHaEnabled) {
// Run refreshServiceAcl for all NNs if HA is enabled
String nsId = dfsUri.getHost();
List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, RefreshAuthorizationPolicyProtocol.class);
for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) {
proxy.getProxy().refreshServiceAcl();
System.out.println("Refresh service acl successful for " + proxy.getAddress());
}
} else {
// Create the client
RefreshAuthorizationPolicyProtocol refreshProtocol = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf), RefreshAuthorizationPolicyProtocol.class).getProxy();
// Refresh the authorization policy in-effect
refreshProtocol.refreshServiceAcl();
System.out.println("Refresh service acl successful");
}
return 0;
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestBlockRecovery method testRaceBetweenReplicaRecoveryAndFinalizeBlock.
/**
* Test to verify the race between finalizeBlock and Lease recovery
*
* @throws Exception
*/
@Test(timeout = 20000)
public void testRaceBetweenReplicaRecoveryAndFinalizeBlock() throws Exception {
// Stop the Mocked DN started in startup()
tearDown();
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, "1000");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
cluster.waitClusterUp();
DistributedFileSystem fs = cluster.getFileSystem();
Path path = new Path("/test");
FSDataOutputStream out = fs.create(path);
out.writeBytes("data");
out.hsync();
List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs.open(path));
final LocatedBlock block = blocks.get(0);
final DataNode dataNode = cluster.getDataNodes().get(0);
final AtomicBoolean recoveryInitResult = new AtomicBoolean(true);
Thread recoveryThread = new Thread() {
@Override
public void run() {
try {
DatanodeInfo[] locations = block.getLocations();
final RecoveringBlock recoveringBlock = new RecoveringBlock(block.getBlock(), locations, block.getBlock().getGenerationStamp() + 1);
try (AutoCloseableLock lock = dataNode.data.acquireDatasetLock()) {
Thread.sleep(2000);
dataNode.initReplicaRecovery(recoveringBlock);
}
} catch (Exception e) {
recoveryInitResult.set(false);
}
}
};
recoveryThread.start();
try {
out.close();
} catch (IOException e) {
Assert.assertTrue("Writing should fail", e.getMessage().contains("are bad. Aborting..."));
} finally {
recoveryThread.join();
}
Assert.assertTrue("Recovery should be initiated successfully", recoveryInitResult.get());
dataNode.updateReplicaUnderRecovery(block.getBlock(), block.getBlock().getGenerationStamp() + 1, block.getBlock().getBlockId(), block.getBlockSize());
} finally {
if (null != cluster) {
cluster.shutdown();
cluster = null;
}
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestBlockReplacement method testBlockMoveAcrossStorageInSameNode.
@Test
public void testBlockMoveAcrossStorageInSameNode() throws Exception {
final Configuration conf = new HdfsConfiguration();
// create only one datanode in the cluster to verify movement within
// datanode.
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final Path file = new Path("/testBlockMoveAcrossStorageInSameNode/file");
DFSTestUtil.createFile(dfs, file, 1024, (short) 1, 1024);
LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file.toString(), 0);
// get the current
LocatedBlock locatedBlock = locatedBlocks.get(0);
ExtendedBlock block = locatedBlock.getBlock();
DatanodeInfo[] locations = locatedBlock.getLocations();
assertEquals(1, locations.length);
StorageType[] storageTypes = locatedBlock.getStorageTypes();
// current block should be written to DISK
assertTrue(storageTypes[0] == StorageType.DISK);
DatanodeInfo source = locations[0];
// move block to ARCHIVE by using same DataNodeInfo for source, proxy and
// destination so that movement happens within datanode
assertTrue(replaceBlock(block, source, source, source, StorageType.ARCHIVE, Status.SUCCESS));
// wait till namenode notified
Thread.sleep(3000);
locatedBlocks = dfs.getClient().getLocatedBlocks(file.toString(), 0);
// get the current
locatedBlock = locatedBlocks.get(0);
assertEquals("Storage should be only one", 1, locatedBlock.getLocations().length);
assertTrue("Block should be moved to ARCHIVE", locatedBlock.getStorageTypes()[0] == StorageType.ARCHIVE);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestBlockReplacement method testBlockReplacementWithPinnedBlocks.
/**
* Test to verify that the copying of pinned block to a different destination
* datanode will throw IOException with error code Status.ERROR_BLOCK_PINNED.
*
*/
@Test(timeout = 90000)
public void testBlockReplacementWithPinnedBlocks() throws Exception {
final Configuration conf = new HdfsConfiguration();
// create only one datanode in the cluster with DISK and ARCHIVE storage
// types.
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
String fileName = "/testBlockReplacementWithPinnedBlocks/file";
final Path file = new Path(fileName);
DFSTestUtil.createFile(dfs, file, 1024, (short) 1, 1024);
LocatedBlock lb = dfs.getClient().getLocatedBlocks(fileName, 0).get(0);
DatanodeInfo[] oldNodes = lb.getLocations();
assertEquals("Wrong block locations", oldNodes.length, 1);
DatanodeInfo source = oldNodes[0];
ExtendedBlock b = lb.getBlock();
DatanodeInfo[] datanodes = dfs.getDataNodeStats();
DatanodeInfo destin = null;
for (DatanodeInfo datanodeInfo : datanodes) {
// choose different destination node
if (!oldNodes[0].equals(datanodeInfo)) {
destin = datanodeInfo;
break;
}
}
assertNotNull("Failed to choose destination datanode!", destin);
assertFalse("Source and destin datanode should be different", source.equals(destin));
// Mock FsDatasetSpi#getPinning to show that the block is pinned.
for (int i = 0; i < cluster.getDataNodes().size(); i++) {
DataNode dn = cluster.getDataNodes().get(i);
LOG.info("Simulate block pinning in datanode " + dn);
DataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
}
// Block movement to a different datanode should fail as the block is
// pinned.
assertTrue("Status code mismatches!", replaceBlock(b, source, source, destin, StorageType.ARCHIVE, Status.ERROR_BLOCK_PINNED));
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestPendingReconstruction method testBlockReceived.
/**
* Test if DatanodeProtocol#blockReceivedAndDeleted can correctly update the
* pending reconstruction. Also make sure the blockReceivedAndDeleted call is
* idempotent to the pending reconstruction.
*/
@Test
public void testBlockReceived() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build();
cluster.waitActive();
DistributedFileSystem hdfs = cluster.getFileSystem();
FSNamesystem fsn = cluster.getNamesystem();
BlockManager blkManager = fsn.getBlockManager();
final String file = "/tmp.txt";
final Path filePath = new Path(file);
short replFactor = 1;
DFSTestUtil.createFile(hdfs, filePath, 1024L, replFactor, 0);
// temporarily stop the heartbeat
ArrayList<DataNode> datanodes = cluster.getDataNodes();
for (int i = 0; i < DATANODE_COUNT; i++) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i), true);
}
hdfs.setReplication(filePath, (short) DATANODE_COUNT);
BlockManagerTestUtil.computeAllPendingWork(blkManager);
assertEquals(1, blkManager.pendingReconstruction.size());
INodeFile fileNode = fsn.getFSDirectory().getINode4Write(file).asFile();
BlockInfo[] blocks = fileNode.getBlocks();
assertEquals(DATANODE_COUNT - 1, blkManager.pendingReconstruction.getNumReplicas(blocks[0]));
LocatedBlock locatedBlock = hdfs.getClient().getLocatedBlocks(file, 0).get(0);
DatanodeInfo existingDn = (locatedBlock.getLocations())[0];
int reportDnNum = 0;
String poolId = cluster.getNamesystem().getBlockPoolId();
// report to NN
for (int i = 0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(poolId);
StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(new DatanodeStorage("Fake-storage-ID-Ignored"), new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
reportDnNum++;
}
}
// IBRs are async, make sure the NN processes all of them.
cluster.getNamesystem().getBlockManager().flushBlockOps();
assertEquals(DATANODE_COUNT - 3, blkManager.pendingReconstruction.getNumReplicas(blocks[0]));
// let the same datanodes report again
for (int i = 0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(poolId);
StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(new DatanodeStorage("Fake-storage-ID-Ignored"), new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
reportDnNum++;
}
}
cluster.getNamesystem().getBlockManager().flushBlockOps();
assertEquals(DATANODE_COUNT - 3, blkManager.pendingReconstruction.getNumReplicas(blocks[0]));
// re-enable heartbeat for the datanode that has data
for (int i = 0; i < DATANODE_COUNT; i++) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i), false);
DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
}
Thread.sleep(5000);
assertEquals(0, blkManager.pendingReconstruction.size());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations