use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.
the class TestPBHelper method testConvertLocatedBlock.
@Test
public void testConvertLocatedBlock() {
LocatedBlock lb = createLocatedBlock();
LocatedBlockProto lbProto = PBHelperClient.convertLocatedBlock(lb);
LocatedBlock lb2 = PBHelperClient.convertLocatedBlockProto(lbProto);
compare(lb, lb2);
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.
the class TestPBHelper method createLocatedBlockNoStorageMedia.
private LocatedBlock createLocatedBlockNoStorageMedia() {
DatanodeInfo[] dnInfos = { DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL) };
LocatedBlock lb = new LocatedBlock(new ExtendedBlock("bp12", 12345, 10, 53), dnInfos);
lb.setBlockToken(new Token<BlockTokenIdentifier>("identifier".getBytes(), "password".getBytes(), new Text("kind"), new Text("service")));
lb.setStartOffset(5);
return lb;
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.
the class TestPBHelper method createLocatedBlock.
private LocatedBlock createLocatedBlock() {
DatanodeInfo[] dnInfos = { DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4", AdminStates.NORMAL) };
String[] storageIDs = { "s1", "s2", "s3", "s4" };
StorageType[] media = { StorageType.DISK, StorageType.SSD, StorageType.DISK, StorageType.RAM_DISK };
LocatedBlock lb = new LocatedBlock(new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, storageIDs, media, 5, false, new DatanodeInfo[] {});
lb.setBlockToken(new Token<BlockTokenIdentifier>("identifier".getBytes(), "password".getBytes(), new Text("kind"), new Text("service")));
return lb;
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.
the class TestMover method testWithinSameNode.
private void testWithinSameNode(Configuration conf) throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testScheduleWithinSameNode/file";
Path dir = new Path("/testScheduleWithinSameNode");
dfs.mkdirs(dir);
// write to DISK
dfs.setStoragePolicy(dir, "HOT");
final FSDataOutputStream out = dfs.create(new Path(file));
out.writeChars("testScheduleWithinSameNode");
out.close();
// verify before movement
LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
StorageType[] storageTypes = lb.getStorageTypes();
for (StorageType storageType : storageTypes) {
Assert.assertTrue(StorageType.DISK == storageType);
}
// move to ARCHIVE
dfs.setStoragePolicy(dir, "COLD");
int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", dir.toString() });
Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
// Wait till namenode notified about the block location details
waitForLocatedBlockWithArchiveStorageType(dfs, file, 3);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.
the class TestMover method testMoverFailedRetryWithPinnedBlocks.
/**
* Test to verify that mover should work well with pinned blocks as well as
* failed blocks. Mover should continue retrying the failed blocks only.
*/
@Test(timeout = 90000)
public void testMoverFailedRetryWithPinnedBlocks() throws Exception {
final Configuration conf = new HdfsConfiguration();
initConf(conf);
conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE } }).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String parenDir = "/parent";
dfs.mkdirs(new Path(parenDir));
final String file1 = "/parent/testMoverFailedRetryWithPinnedBlocks1";
// write to DISK
final FSDataOutputStream out = dfs.create(new Path(file1), (short) 2);
byte[] fileData = StripedFileTestUtil.generateBytes(DEFAULT_BLOCK_SIZE * 2);
out.write(fileData);
out.close();
// Adding pinned blocks.
createFileWithFavoredDatanodes(conf, cluster, dfs);
// Delete block file so, block move will fail with FileNotFoundException
LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file1, 0);
Assert.assertEquals("Wrong block count", 2, locatedBlocks.locatedBlockCount());
LocatedBlock lb = locatedBlocks.get(0);
cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
// move to ARCHIVE
dfs.setStoragePolicy(new Path(parenDir), "COLD");
int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", parenDir.toString() });
Assert.assertEquals("Movement should fail after some retry", ExitStatus.NO_MOVE_PROGRESS.getExitCode(), rc);
} finally {
cluster.shutdown();
}
}
Aggregations