use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestBalancerWithMultipleNameNodes method unevenDistribution.
/**
* First start a cluster and fill the cluster up to a certain size. Then
* redistribute blocks according the required distribution. Finally, balance
* the cluster.
*
* @param nNameNodes Number of NameNodes
* @param nNameNodesToBalance Number of NameNodes to run the balancer on
* @param distributionPerNN The distribution for each NameNode.
* @param capacities Capacities of the datanodes
* @param racks Rack names
* @param conf Configuration
*/
private void unevenDistribution(final int nNameNodes, final int nNameNodesToBalance, long[] distributionPerNN, long[] capacities, String[] racks, Configuration conf) throws Exception {
LOG.info("UNEVEN 0");
final int nDataNodes = distributionPerNN.length;
if (capacities.length != nDataNodes || racks.length != nDataNodes) {
throw new IllegalArgumentException("Array length is not the same");
}
if (nNameNodesToBalance > nNameNodes) {
throw new IllegalArgumentException("Number of namenodes to balance is " + "greater than the number of namenodes.");
}
// calculate total space that need to be filled
final long usedSpacePerNN = TestBalancer.sum(distributionPerNN);
// fill the cluster
final ExtendedBlock[][] blocks;
{
LOG.info("UNEVEN 1");
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration(conf)).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes)).numDataNodes(nDataNodes).racks(racks).simulatedCapacities(capacities).build();
LOG.info("UNEVEN 2");
try {
cluster.waitActive();
DFSTestUtil.setFederatedConfiguration(cluster, conf);
LOG.info("UNEVEN 3");
final Suite s = new Suite(cluster, nNameNodes, nDataNodes, null, conf);
blocks = generateBlocks(s, usedSpacePerNN);
LOG.info("UNEVEN 4");
} finally {
cluster.shutdown();
}
}
conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f");
{
LOG.info("UNEVEN 10");
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes)).numDataNodes(nDataNodes).racks(racks).simulatedCapacities(capacities).format(false).build();
LOG.info("UNEVEN 11");
try {
cluster.waitActive();
LOG.info("UNEVEN 12");
Set<String> blockpools = new HashSet<String>();
for (int i = 0; i < nNameNodesToBalance; i++) {
blockpools.add(cluster.getNamesystem(i).getBlockPoolId());
}
BalancerParameters.Builder b = new BalancerParameters.Builder();
b.setBlockpools(blockpools);
BalancerParameters params = b.build();
final Suite s = new Suite(cluster, nNameNodes, nDataNodes, params, conf);
for (int n = 0; n < nNameNodes; n++) {
// redistribute blocks
final Block[][] blocksDN = TestBalancer.distributeBlocks(blocks[n], s.replication, distributionPerNN);
for (int d = 0; d < blocksDN.length; d++) cluster.injectBlocks(n, d, Arrays.asList(blocksDN[d]));
LOG.info("UNEVEN 13: n=" + n);
}
final long totalCapacity = TestBalancer.sum(capacities);
final long totalUsed = nNameNodes * usedSpacePerNN;
LOG.info("UNEVEN 14");
runBalancer(s, totalUsed, totalCapacity);
LOG.info("UNEVEN 15");
} finally {
cluster.shutdown();
}
LOG.info("UNEVEN 16");
}
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestOverReplicatedBlocks method testInvalidateOverReplicatedBlock.
/**
* Test over replicated block should get invalidated when decreasing the
* replication for a partial block.
*/
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
final FSNamesystem namesystem = cluster.getNamesystem();
final BlockManager bm = namesystem.getBlockManager();
FileSystem fs = cluster.getFileSystem();
Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
FSDataOutputStream out = fs.create(p, (short) 2);
out.writeBytes("HDFS-3119: " + p);
out.hsync();
fs.setReplication(p, (short) 1);
out.close();
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
assertEquals("Expected only one live replica for the block", 1, bm.countNodes(bm.getStoredBlock(block.getLocalBlock())).liveReplicas());
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestCommitBlockSynchronization method testCommitBlockSynchronizationWithCloseAndNonExistantTarget.
@Test
public void testCommitBlockSynchronizationWithCloseAndNonExistantTarget() throws IOException {
INodeFile file = mockFileUnderConstruction();
Block block = new Block(blockId, length, genStamp);
FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
DatanodeID[] newTargets = new DatanodeID[] { new DatanodeID("0.0.0.0", "nonexistantHost", "1", 0, 0, 0, 0) };
String[] storageIDs = new String[] { "fake-storage-ID" };
ExtendedBlock lastBlock = new ExtendedBlock();
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, storageIDs);
// Repeat the call to make sure it returns true
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, storageIDs);
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestCommitBlockSynchronization method testCommitBlockSynchronizationWithDelete.
@Test
public void testCommitBlockSynchronizationWithDelete() throws IOException {
INodeFile file = mockFileUnderConstruction();
Block block = new Block(blockId, length, genStamp);
FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
DatanodeID[] newTargets = new DatanodeID[0];
ExtendedBlock lastBlock = new ExtendedBlock();
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, true, newTargets, null);
// Simulate removing the last block from the file.
doReturn(null).when(file).removeLastBlock(any(Block.class));
// Repeat the call to make sure it does not throw
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, true, newTargets, null);
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestUpdatePipelineWithSnapshots method testUpdatePipelineAfterDelete.
// Regression test for HDFS-6647.
@Test
public void testUpdatePipelineAfterDelete() throws Exception {
Configuration conf = new HdfsConfiguration();
Path file = new Path("/test-file");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
NamenodeProtocols namenode = cluster.getNameNodeRpc();
DFSOutputStream out = null;
try {
// Create a file and make sure a block is allocated for it.
out = (DFSOutputStream) (fs.create(file).getWrappedStream());
out.write(1);
out.hflush();
// Create a snapshot that includes the file.
SnapshotTestHelper.createSnapshot((DistributedFileSystem) fs, new Path("/"), "s1");
// Grab the block info of this file for later use.
FSDataInputStream in = null;
ExtendedBlock oldBlock = null;
try {
in = fs.open(file);
oldBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
} finally {
IOUtils.closeStream(in);
}
// Allocate a new block ID/gen stamp so we can simulate pipeline
// recovery.
String clientName = ((DistributedFileSystem) fs).getClient().getClientName();
LocatedBlock newLocatedBlock = namenode.updateBlockForPipeline(oldBlock, clientName);
ExtendedBlock newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(), oldBlock.getBlockId(), oldBlock.getNumBytes(), newLocatedBlock.getBlock().getGenerationStamp());
// Delete the file from the present FS. It will still exist the
// previously-created snapshot. This will log an OP_DELETE for the
// file in question.
fs.delete(file, true);
// logged for the file in question.
try {
namenode.updatePipeline(clientName, oldBlock, newBlock, newLocatedBlock.getLocations(), newLocatedBlock.getStorageIDs());
} catch (IOException ioe) {
// normal
assertExceptionContains("does not exist or it is not under construction", ioe);
}
// Make sure the NN can restart with the edit logs as we have them now.
cluster.restartNameNode(true);
} finally {
IOUtils.closeStream(out);
}
} finally {
cluster.shutdown();
}
}
Aggregations