use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestAddBlock method testAddBlockUC.
/**
* Test adding new blocks but without closing the corresponding the file
*/
@Test
public void testAddBlockUC() throws Exception {
DistributedFileSystem fs = cluster.getFileSystem();
final Path file1 = new Path("/file1");
DFSTestUtil.createFile(fs, file1, BLOCKSIZE - 1, REPLICATION, 0L);
FSDataOutputStream out = null;
try {
// append files without closing the streams
out = fs.append(file1);
String appendContent = "appending-content";
out.writeBytes(appendContent);
((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
// restart NN
cluster.restartNameNode(true);
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
INodeFile fileNode = fsdir.getINode4Write(file1.toString()).asFile();
BlockInfo[] fileBlocks = fileNode.getBlocks();
assertEquals(2, fileBlocks.length);
assertEquals(BLOCKSIZE, fileBlocks[0].getNumBytes());
assertEquals(BlockUCState.COMPLETE, fileBlocks[0].getBlockUCState());
assertEquals(appendContent.length() - 1, fileBlocks[1].getNumBytes());
assertEquals(BlockUCState.UNDER_CONSTRUCTION, fileBlocks[1].getBlockUCState());
} finally {
if (out != null) {
out.close();
}
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestMover method testTwoReplicaSameStorageTypeShouldNotSelect.
@Test(timeout = 300000)
public void testTwoReplicaSameStorageTypeShouldNotSelect() throws Exception {
// HDFS-8147
final Configuration conf = new HdfsConfiguration();
initConf(conf);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.ARCHIVE } }).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testForTwoReplicaSameStorageTypeShouldNotSelect";
// write to DISK
final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
out.writeChars("testForTwoReplicaSameStorageTypeShouldNotSelect");
out.close();
// verify before movement
LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
StorageType[] storageTypes = lb.getStorageTypes();
for (StorageType storageType : storageTypes) {
Assert.assertTrue(StorageType.DISK == storageType);
}
// move to ARCHIVE
dfs.setStoragePolicy(new Path(file), "COLD");
int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", file.toString() });
Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
// Wait till namenode notified about the block location details
waitForLocatedBlockWithArchiveStorageType(dfs, file, 2);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestMover method testMoverWithPinnedBlocks.
/**
* Test to verify that mover can't move pinned blocks.
*/
@Test(timeout = 90000)
public void testMoverWithPinnedBlocks() throws Exception {
final Configuration conf = new HdfsConfiguration();
initConf(conf);
// Sets bigger retry max attempts value so that test case will timed out if
// block pinning errors are not handled properly during block movement.
conf.setInt(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, 10000);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testMoverWithPinnedBlocks/file";
Path dir = new Path("/testMoverWithPinnedBlocks");
dfs.mkdirs(dir);
// write to DISK
dfs.setStoragePolicy(dir, "HOT");
final FSDataOutputStream out = dfs.create(new Path(file));
byte[] fileData = StripedFileTestUtil.generateBytes(DEFAULT_BLOCK_SIZE * 3);
out.write(fileData);
out.close();
// verify before movement
LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
StorageType[] storageTypes = lb.getStorageTypes();
for (StorageType storageType : storageTypes) {
Assert.assertTrue(StorageType.DISK == storageType);
}
// Adding one SSD based data node to the cluster.
StorageType[][] newtypes = new StorageType[][] { { StorageType.SSD } };
startAdditionalDNs(conf, 1, newtypes, cluster);
// Mock FsDatasetSpi#getPinning to show that the block is pinned.
for (int i = 0; i < cluster.getDataNodes().size(); i++) {
DataNode dn = cluster.getDataNodes().get(i);
LOG.info("Simulate block pinning in datanode {}", dn);
DataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
}
// move file blocks to ONE_SSD policy
dfs.setStoragePolicy(dir, "ONE_SSD");
int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", dir.toString() });
int exitcode = ExitStatus.NO_MOVE_BLOCK.getExitCode();
Assert.assertEquals("Movement should fail", exitcode, rc);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestMover method testScheduleSameBlock.
@Test
public void testScheduleSameBlock() throws IOException {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testScheduleSameBlock/file";
{
final FSDataOutputStream out = dfs.create(new Path(file));
out.writeChars("testScheduleSameBlock");
out.close();
}
final Mover mover = newMover(conf);
mover.init();
final Mover.Processor processor = mover.new Processor();
final LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
final List<MLocation> locations = MLocation.toLocations(lb);
final MLocation ml = locations.get(0);
final DBlock db = mover.newDBlock(lb, locations, null);
final List<StorageType> storageTypes = new ArrayList<StorageType>(Arrays.asList(StorageType.DEFAULT, StorageType.DEFAULT));
Assert.assertTrue(processor.scheduleMoveReplica(db, ml, storageTypes));
Assert.assertFalse(processor.scheduleMoveReplica(db, ml, storageTypes));
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestReconstructStripedBlocks method testCountLiveReplicas.
/**
* make sure the NN can detect the scenario where there are enough number of
* internal blocks (>=9 by default) but there is still missing data/parity
* block.
*/
@Test
public void testCountLiveReplicas() throws Exception {
final HdfsConfiguration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false);
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
try {
fs.mkdirs(dirPath);
fs.setErasureCodingPolicy(dirPath, StripedFileTestUtil.getDefaultECPolicy().getName());
DFSTestUtil.createFile(fs, filePath, cellSize * dataBlocks * 2, (short) 1, 0L);
// stop a dn
LocatedBlocks blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0);
LocatedStripedBlock block = (LocatedStripedBlock) blks.getLastLocatedBlock();
DatanodeInfo dnToStop = block.getLocations()[0];
MiniDFSCluster.DataNodeProperties dnProp = cluster.stopDataNode(dnToStop.getXferAddr());
cluster.setDataNodeDead(dnToStop);
// wait for reconstruction to happen
DFSTestUtil.waitForReplication(fs, filePath, groupSize, 15 * 1000);
// bring the dn back: 10 internal blocks now
cluster.restartDataNode(dnProp);
cluster.waitActive();
// stop another dn: 9 internal blocks, but only cover 8 real one
dnToStop = block.getLocations()[1];
cluster.stopDataNode(dnToStop.getXferAddr());
cluster.setDataNodeDead(dnToStop);
// currently namenode is able to track the missing block. but restart NN
cluster.restartNameNode(true);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerBlockReport(dn);
}
FSNamesystem fsn = cluster.getNamesystem();
BlockManager bm = fsn.getBlockManager();
// wait 3 running cycles of redundancy monitor
Thread.sleep(3000);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerHeartbeat(dn);
}
// check if NN can detect the missing internal block and finish the
// reconstruction
StripedFileTestUtil.waitForReconstructionFinished(filePath, fs, groupSize);
boolean reconstructed = false;
for (int i = 0; i < 5; i++) {
NumberReplicas num = null;
fsn.readLock();
try {
BlockInfo blockInfo = cluster.getNamesystem().getFSDirectory().getINode4Write(filePath.toString()).asFile().getLastBlock();
num = bm.countNodes(blockInfo);
} finally {
fsn.readUnlock();
}
if (num.liveReplicas() >= groupSize) {
reconstructed = true;
break;
} else {
Thread.sleep(1000);
}
}
Assert.assertTrue(reconstructed);
blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0);
block = (LocatedStripedBlock) blks.getLastLocatedBlock();
BitSet bitSet = new BitSet(groupSize);
for (byte index : block.getBlockIndices()) {
bitSet.set(index);
}
for (int i = 0; i < groupSize; i++) {
Assert.assertTrue(bitSet.get(i));
}
} finally {
cluster.shutdown();
}
}
Aggregations