use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestReplication method changeBlockLen.
private void changeBlockLen(MiniDFSCluster cluster, int lenDelta) throws IOException, InterruptedException, TimeoutException {
final Path fileName = new Path("/file1");
final short REPLICATION_FACTOR = (short) 1;
final FileSystem fs = cluster.getFileSystem();
final int fileLen = fs.getConf().getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
DFSTestUtil.createFile(fs, fileName, fileLen, REPLICATION_FACTOR, 0);
DFSTestUtil.waitReplication(fs, fileName, REPLICATION_FACTOR);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
// Change the length of a replica
for (int i = 0; i < cluster.getDataNodes().size(); i++) {
if (DFSTestUtil.changeReplicaLength(cluster, block, i, lenDelta)) {
break;
}
}
// increase the file's replication factor
fs.setReplication(fileName, (short) (REPLICATION_FACTOR + 1));
// block replication triggers corrupt block detection
DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), fs.getConf());
LocatedBlocks blocks = dfsClient.getNamenode().getBlockLocations(fileName.toString(), 0, fileLen);
if (lenDelta < 0) {
// replica truncated
while (!blocks.get(0).isCorrupt() || REPLICATION_FACTOR != blocks.get(0).getLocations().length) {
Thread.sleep(100);
blocks = dfsClient.getNamenode().getBlockLocations(fileName.toString(), 0, fileLen);
}
} else {
// no corruption detected; block replicated
while (REPLICATION_FACTOR + 1 != blocks.get(0).getLocations().length) {
Thread.sleep(100);
blocks = dfsClient.getNamenode().getBlockLocations(fileName.toString(), 0, fileLen);
}
}
fs.delete(fileName, true);
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestPread method pReadFile.
private void pReadFile(FileSystem fileSys, Path name) throws IOException {
FSDataInputStream stm = fileSys.open(name);
byte[] expected = new byte[fileSize];
if (simulatedStorage) {
assert fileSys instanceof DistributedFileSystem;
DistributedFileSystem dfs = (DistributedFileSystem) fileSys;
LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(name.toString(), 0, fileSize);
DFSTestUtil.fillExpectedBuf(lbs, expected);
} else {
Random rand = new Random(seed);
rand.nextBytes(expected);
}
// do a sanity check. Read first 4K bytes
byte[] actual = new byte[4096];
stm.readFully(actual);
checkAndEraseData(actual, 0, expected, "Read Sanity Test");
// now do a pread for the first 8K bytes
actual = new byte[8192];
doPread(stm, 0L, actual, 0, 8192);
checkAndEraseData(actual, 0, expected, "Pread Test 1");
// Now check to see if the normal read returns 4K-8K byte range
actual = new byte[4096];
stm.readFully(actual);
checkAndEraseData(actual, 4096, expected, "Pread Test 2");
// Now see if we can cross a single block boundary successfully
// read 4K bytes from blockSize - 2K offset
stm.readFully(blockSize - 2048, actual, 0, 4096);
checkAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 3");
// now see if we can cross two block boundaries successfully
// read blockSize + 4K bytes from blockSize - 2K offset
actual = new byte[blockSize + 4096];
stm.readFully(blockSize - 2048, actual);
checkAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 4");
// now see if we can cross two block boundaries that are not cached
// read blockSize + 4K bytes from 10*blockSize - 2K offset
actual = new byte[blockSize + 4096];
stm.readFully(10 * blockSize - 2048, actual);
checkAndEraseData(actual, (10 * blockSize - 2048), expected, "Pread Test 5");
// now check that even after all these preads, we can still read
// bytes 8K-12K
actual = new byte[4096];
stm.readFully(actual);
checkAndEraseData(actual, 8192, expected, "Pread Test 6");
// done
stm.close();
// check block location caching
stm = fileSys.open(name);
stm.readFully(1, actual, 0, 4096);
stm.readFully(4 * blockSize, actual, 0, 4096);
stm.readFully(7 * blockSize, actual, 0, 4096);
actual = new byte[3 * 4096];
stm.readFully(0 * blockSize, actual, 0, 3 * 4096);
checkAndEraseData(actual, 0, expected, "Pread Test 7");
actual = new byte[8 * 4096];
stm.readFully(3 * blockSize, actual, 0, 8 * 4096);
checkAndEraseData(actual, 3 * blockSize, expected, "Pread Test 8");
// read the tail
stm.readFully(11 * blockSize + blockSize / 2, actual, 0, blockSize / 2);
IOException res = null;
try {
// read beyond the end of the file
stm.readFully(11 * blockSize + blockSize / 2, actual, 0, blockSize);
} catch (IOException e) {
// should throw an exception
res = e;
}
assertTrue("Error reading beyond file boundary.", res != null);
stm.close();
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestBlockManager method testBlockManagerMachinesArray.
@Test
public void testBlockManagerMachinesArray() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
cluster.waitActive();
BlockManager blockManager = cluster.getNamesystem().getBlockManager();
FileSystem fs = cluster.getFileSystem();
final Path filePath = new Path("/tmp.txt");
final long fileLen = 1L;
DFSTestUtil.createFile(fs, filePath, fileLen, (short) 3, 1L);
ArrayList<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 4);
FSNamesystem ns = cluster.getNamesystem();
// get the block
final String bpid = cluster.getNamesystem().getBlockPoolId();
File storageDir = cluster.getInstanceStorageDir(0, 0);
File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
assertTrue("Data directory does not exist", dataDir.exists());
BlockInfo blockInfo = blockManager.blocksMap.getBlocks().iterator().next();
ExtendedBlock blk = new ExtendedBlock(bpid, blockInfo.getBlockId(), blockInfo.getNumBytes(), blockInfo.getGenerationStamp());
DatanodeDescriptor failedStorageDataNode = blockManager.getStoredBlock(blockInfo).getDatanode(0);
DatanodeDescriptor corruptStorageDataNode = blockManager.getStoredBlock(blockInfo).getDatanode(1);
ArrayList<StorageReport> reports = new ArrayList<StorageReport>();
for (int i = 0; i < failedStorageDataNode.getStorageInfos().length; i++) {
DatanodeStorageInfo storageInfo = failedStorageDataNode.getStorageInfos()[i];
DatanodeStorage dns = new DatanodeStorage(failedStorageDataNode.getStorageInfos()[i].getStorageID(), DatanodeStorage.State.FAILED, failedStorageDataNode.getStorageInfos()[i].getStorageType());
while (storageInfo.getBlockIterator().hasNext()) {
BlockInfo blockInfo1 = storageInfo.getBlockIterator().next();
if (blockInfo1.equals(blockInfo)) {
StorageReport report = new StorageReport(dns, true, storageInfo.getCapacity(), storageInfo.getDfsUsed(), storageInfo.getRemaining(), storageInfo.getBlockPoolUsed(), 0L);
reports.add(report);
break;
}
}
}
failedStorageDataNode.updateHeartbeat(reports.toArray(StorageReport.EMPTY_ARRAY), 0L, 0L, 0, 0, null);
ns.writeLock();
DatanodeStorageInfo corruptStorageInfo = null;
for (int i = 0; i < corruptStorageDataNode.getStorageInfos().length; i++) {
corruptStorageInfo = corruptStorageDataNode.getStorageInfos()[i];
while (corruptStorageInfo.getBlockIterator().hasNext()) {
BlockInfo blockInfo1 = corruptStorageInfo.getBlockIterator().next();
if (blockInfo1.equals(blockInfo)) {
break;
}
}
}
blockManager.findAndMarkBlockAsCorrupt(blk, corruptStorageDataNode, corruptStorageInfo.getStorageID(), CorruptReplicasMap.Reason.ANY.toString());
ns.writeUnlock();
BlockInfo[] blockInfos = new BlockInfo[] { blockInfo };
ns.readLock();
LocatedBlocks locatedBlocks = blockManager.createLocatedBlocks(blockInfos, 3L, false, 0L, 3L, false, false, null, null);
assertTrue("Located Blocks should exclude corrupt" + "replicas and failed storages", locatedBlocks.getLocatedBlocks().size() == 1);
ns.readUnlock();
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestBalancer method doTestBalancerWithStripedFile.
private void doTestBalancerWithStripedFile(Configuration conf) throws Exception {
int numOfDatanodes = dataBlocks + parityBlocks + 3;
int numOfRacks = dataBlocks;
long capacity = 20 * defaultBlockSize;
long[] capacities = new long[numOfDatanodes];
for (int i = 0; i < capacities.length; i++) {
capacities[i] = capacity;
}
String[] racks = new String[numOfDatanodes];
for (int i = 0; i < numOfDatanodes; i++) {
racks[i] = "/rack" + (i % numOfRacks);
}
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numOfDatanodes).racks(racks).simulatedCapacities(capacities).build();
try {
cluster.waitActive();
client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
client.setErasureCodingPolicy("/", StripedFileTestUtil.getDefaultECPolicy().getName());
long totalCapacity = sum(capacities);
// fill up the cluster with 30% data. It'll be 45% full plus parity.
long fileLen = totalCapacity * 3 / 10;
long totalUsedSpace = fileLen * (dataBlocks + parityBlocks) / dataBlocks;
FileSystem fs = cluster.getFileSystem(0);
DFSTestUtil.createFile(fs, filePath, fileLen, (short) 3, r.nextLong());
// verify locations of striped blocks
LocatedBlocks locatedBlocks = client.getBlockLocations(fileName, 0, fileLen);
StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks, groupSize);
// add datanodes in new rack
String newRack = "/rack" + (++numOfRacks);
cluster.startDataNodes(conf, 2, true, null, new String[] { newRack, newRack }, null, new long[] { capacity, capacity });
totalCapacity += capacity * 2;
cluster.triggerHeartbeats();
// run balancer and validate results
BalancerParameters p = BalancerParameters.DEFAULT;
runBalancer(conf, totalUsedSpace, totalCapacity, p, 0);
// verify locations of striped blocks
locatedBlocks = client.getBlockLocations(fileName, 0, fileLen);
StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks, groupSize);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestReconstructStripedBlocksWithRackAwareness method testChooseExcessReplicasToDelete.
@Test
public void testChooseExcessReplicasToDelete() throws Exception {
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts).numDataNodes(hosts.length).build();
cluster.waitActive();
fs = cluster.getFileSystem();
fs.setErasureCodingPolicy(new Path("/"), StripedFileTestUtil.getDefaultECPolicy().getName());
MiniDFSCluster.DataNodeProperties lastHost = stopDataNode(hosts[hosts.length - 1]);
final Path file = new Path("/foo");
DFSTestUtil.createFile(fs, file, cellSize * dataBlocks * 2, (short) 1, 0L);
// stop host1
MiniDFSCluster.DataNodeProperties host1 = stopDataNode("host1");
// bring last host back
cluster.restartDataNode(lastHost);
cluster.waitActive();
// wait for reconstruction to finish
final short blockNum = (short) (dataBlocks + parityBlocks);
DFSTestUtil.waitForReplication(fs, file, blockNum, 15 * 1000);
// restart host1
cluster.restartDataNode(host1);
cluster.waitActive();
for (DataNode dn : cluster.getDataNodes()) {
if (dn.getDatanodeId().getHostName().equals("host1")) {
DataNodeTestUtils.triggerBlockReport(dn);
break;
}
}
// make sure the excess replica is detected, and we delete host1's replica
// so that we have 6 racks
DFSTestUtil.waitForReplication(fs, file, blockNum, 15 * 1000);
LocatedBlocks blks = fs.getClient().getLocatedBlocks(file.toString(), 0);
LocatedStripedBlock block = (LocatedStripedBlock) blks.getLastLocatedBlock();
for (DatanodeInfo dn : block.getLocations()) {
Assert.assertFalse(dn.getHostName().equals("host1"));
}
}
Aggregations