use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestFileAppend method testBreakHardlinksIfNeeded.
@Test
public void testBreakHardlinksIfNeeded() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
try {
// create a new file, write to it and close it.
Path file1 = new Path("/filestatus.dat");
FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
writeFile(stm);
stm.close();
// Get a handle to the datanode
DataNode[] dn = cluster.listDataNodes();
assertTrue("There should be only one datanode but found " + dn.length, dn.length == 1);
LocatedBlocks locations = client.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
List<LocatedBlock> blocks = locations.getLocatedBlocks();
final FsDatasetSpi<?> fsd = dn[0].getFSDataset();
//
for (int i = 0; i < blocks.size(); i = i + 2) {
ExtendedBlock b = blocks.get(i).getBlock();
final File f = FsDatasetTestUtil.getBlockFile(fsd, b.getBlockPoolId(), b.getLocalBlock());
File link = new File(f.toString() + ".link");
System.out.println("Creating hardlink for File " + f + " to " + link);
HardLink.createHardLink(f, link);
}
// Detach all blocks. This should remove hardlinks (if any)
for (int i = 0; i < blocks.size(); i++) {
ExtendedBlock b = blocks.get(i).getBlock();
System.out.println("breakHardlinksIfNeeded detaching block " + b);
assertTrue("breakHardlinksIfNeeded(" + b + ") should have returned true", FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b));
}
// return false
for (int i = 0; i < blocks.size(); i++) {
ExtendedBlock b = blocks.get(i).getBlock();
System.out.println("breakHardlinksIfNeeded re-attempting to " + "detach block " + b);
assertTrue("breakHardlinksIfNeeded(" + b + ") should have returned false", FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b));
}
} finally {
client.close();
fs.close();
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestFileCreation method testLeaseExpireHardLimit.
/**
* Create a file, write something, hflush but not close.
* Then change lease period and wait for lease recovery.
* Finally, read the block directly from each Datanode and verify the content.
*/
@Test
public void testLeaseExpireHardLimit() throws Exception {
System.out.println("testLeaseExpireHardLimit start");
final long leasePeriod = 1000;
final int DATANODE_NUM = 3;
Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
DistributedFileSystem dfs = null;
try {
cluster.waitActive();
dfs = cluster.getFileSystem();
// create a new file.
final String f = DIR + "foo";
final Path fpath = new Path(f);
HdfsDataOutputStream out = create(dfs, fpath, DATANODE_NUM);
out.write("something".getBytes());
out.hflush();
int actualRepl = out.getCurrentBlockReplication();
assertTrue(f + " should be replicated to " + DATANODE_NUM + " datanodes.", actualRepl == DATANODE_NUM);
// set the soft and hard limit to be 1 second so that the
// namenode triggers lease recovery
cluster.setLeasePeriod(leasePeriod, leasePeriod);
// wait for the lease to expire
try {
Thread.sleep(5 * leasePeriod);
} catch (InterruptedException e) {
}
LocatedBlocks locations = dfs.dfs.getNamenode().getBlockLocations(f, 0, Long.MAX_VALUE);
assertEquals(1, locations.locatedBlockCount());
LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
int successcount = 0;
for (DatanodeInfo datanodeinfo : locatedblock.getLocations()) {
DataNode datanode = cluster.getDataNode(datanodeinfo.getIpcPort());
ExtendedBlock blk = locatedblock.getBlock();
try (BufferedReader in = new BufferedReader(new InputStreamReader(datanode.getFSDataset().getBlockInputStream(blk, 0)))) {
assertEquals("something", in.readLine());
successcount++;
}
}
System.out.println("successcount=" + successcount);
assertTrue(successcount > 0);
} finally {
IOUtils.closeStream(dfs);
cluster.shutdown();
}
System.out.println("testLeaseExpireHardLimit successful");
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestInjectionForSimulatedStorage method waitForBlockReplication.
// Waits for all of the blocks to have expected replication
private void waitForBlockReplication(String filename, ClientProtocol namenode, int expected, long maxWaitSec) throws IOException {
long start = Time.monotonicNow();
//wait for all the blocks to be replicated;
LOG.info("Checking for block replication for " + filename);
LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE);
assertEquals(numBlocks, blocks.locatedBlockCount());
for (int i = 0; i < numBlocks; ++i) {
LOG.info("Checking for block:" + (i + 1));
while (true) {
// Loop to check for block i (usually when 0 is done all will be done
blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE);
assertEquals(numBlocks, blocks.locatedBlockCount());
LocatedBlock block = blocks.get(i);
int actual = block.getLocations().length;
if (actual == expected) {
LOG.info("Got enough replicas for " + (i + 1) + "th block " + block.getBlock() + ", got " + actual + ".");
break;
}
LOG.info("Not enough replicas for " + (i + 1) + "th block " + block.getBlock() + " yet. Expecting " + expected + ", got " + actual + ".");
if (maxWaitSec > 0 && (Time.monotonicNow() - start) > (maxWaitSec * 1000)) {
throw new IOException("Timedout while waiting for all blocks to " + " be replicated for " + filename);
}
try {
Thread.sleep(500);
} catch (InterruptedException ignored) {
}
}
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestBlockToken method testBlockTokenInLastLocatedBlock.
/**
* This test writes a file and gets the block locations without closing the
* file, and tests the block token in the last block. Block token is verified
* by ensuring it is of correct kind.
*
* @throws IOException
* @throws InterruptedException
*/
private void testBlockTokenInLastLocatedBlock(boolean enableProtobuf) throws IOException, InterruptedException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE, enableProtobuf);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem fs = cluster.getFileSystem();
String fileName = "/testBlockTokenInLastLocatedBlock";
Path filePath = new Path(fileName);
FSDataOutputStream out = fs.create(filePath, (short) 1);
out.write(new byte[1000]);
// ensure that the first block is written out (see FSOutputSummer#flush)
out.flush();
LocatedBlocks locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(fileName, 0, 1000);
while (locatedBlocks.getLastLocatedBlock() == null) {
Thread.sleep(100);
locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(fileName, 0, 1000);
}
Token<BlockTokenIdentifier> token = locatedBlocks.getLastLocatedBlock().getBlockToken();
Assert.assertEquals(BlockTokenIdentifier.KIND_NAME, token.getKind());
out.close();
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestMover method testMoverFailedRetryWithPinnedBlocks.
/**
* Test to verify that mover should work well with pinned blocks as well as
* failed blocks. Mover should continue retrying the failed blocks only.
*/
@Test(timeout = 90000)
public void testMoverFailedRetryWithPinnedBlocks() throws Exception {
final Configuration conf = new HdfsConfiguration();
initConf(conf);
conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE } }).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String parenDir = "/parent";
dfs.mkdirs(new Path(parenDir));
final String file1 = "/parent/testMoverFailedRetryWithPinnedBlocks1";
// write to DISK
final FSDataOutputStream out = dfs.create(new Path(file1), (short) 2);
byte[] fileData = StripedFileTestUtil.generateBytes(DEFAULT_BLOCK_SIZE * 2);
out.write(fileData);
out.close();
// Adding pinned blocks.
createFileWithFavoredDatanodes(conf, cluster, dfs);
// Delete block file so, block move will fail with FileNotFoundException
LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file1, 0);
Assert.assertEquals("Wrong block count", 2, locatedBlocks.locatedBlockCount());
LocatedBlock lb = locatedBlocks.get(0);
cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
// move to ARCHIVE
dfs.setStoragePolicy(new Path(parenDir), "COLD");
int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", parenDir.toString() });
Assert.assertEquals("Movement should fail after some retry", ExitStatus.NO_MOVE_PROGRESS.getExitCode(), rc);
} finally {
cluster.shutdown();
}
}
Aggregations