use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestBlocksWithNotEnoughRacks method testSufficientlyReplBlocksUsesNewRack.
/*
* Creates a block with all datanodes on the same rack, though the block
* is sufficiently replicated. Adds an additional datanode on a new rack.
* The block should be replicated to the new rack.
*/
@Test
public void testSufficientlyReplBlocksUsesNewRack() throws Exception {
Configuration conf = getConf();
final short REPLICATION_FACTOR = 3;
final Path filePath = new Path("/testFile");
// All datanodes are on the same rack
String[] racks = { "/rack1", "/rack1", "/rack1" };
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
try {
// Create a file with one block with a replication factor of 3
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);
// Add a new datanode on a different rack
String[] newRacks = { "/rack2" };
cluster.startDataNodes(conf, 1, true, null, newRacks);
cluster.waitActive();
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestBlocksWithNotEnoughRacks method testCorruptBlockRereplicatedAcrossRacks.
/*
* Test that a block that is re-replicated because one of its replicas
* is found to be corrupt and is re-replicated across racks.
*/
@Test
public void testCorruptBlockRereplicatedAcrossRacks() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 2;
int fileLen = 512;
final Path filePath = new Path("/testFile");
// Datanodes are spread across two racks
String[] racks = { "/rack1", "/rack1", "/rack2", "/rack2" };
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
// Create a file with one block with a replication factor of 2
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, fileLen, REPLICATION_FACTOR, 1L);
final byte[] fileContent = DFSTestUtil.readFileAsBytes(fs, filePath);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Corrupt a replica of the block
int dnToCorrupt = DFSTestUtil.firstDnWithBlock(cluster, b);
cluster.corruptReplica(dnToCorrupt, b);
// Restart the datanode so blocks are re-scanned, and the corrupt
// block is detected.
cluster.restartDataNode(dnToCorrupt);
// Wait for the namenode to notice the corrupt replica
DFSTestUtil.waitCorruptReplicas(fs, ns, filePath, b, 1);
// The rack policy is still respected
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// have been cleaned up yet).
for (int i = 0; i < racks.length; i++) {
byte[] blockContent = cluster.readBlockOnDataNodeAsBytes(i, b);
if (blockContent != null && i != dnToCorrupt) {
assertArrayEquals("Corrupt replica", fileContent, blockContent);
}
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestDatanodeManager method HelperFunction.
/**
* Helper function that tests the DatanodeManagers SortedBlock function
* we invoke this function with and without topology scripts
*
* @param scriptFileName - Script Name or null
*
* @throws URISyntaxException
* @throws IOException
*/
public void HelperFunction(String scriptFileName) throws URISyntaxException, IOException {
// create the DatanodeManager which will be tested
Configuration conf = new Configuration();
FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
Mockito.when(fsn.hasWriteLock()).thenReturn(true);
if (scriptFileName != null && !scriptFileName.isEmpty()) {
URL shellScript = getClass().getResource(scriptFileName);
Path resourcePath = Paths.get(shellScript.toURI());
FileUtil.setExecutable(resourcePath.toFile(), true);
conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, resourcePath.toString());
}
DatanodeManager dm = mockDatanodeManager(fsn, conf);
// register 5 datanodes, each with different storage ID and type
DatanodeInfo[] locs = new DatanodeInfo[5];
String[] storageIDs = new String[5];
StorageType[] storageTypes = new StorageType[] { StorageType.ARCHIVE, StorageType.DEFAULT, StorageType.DISK, StorageType.RAM_DISK, StorageType.SSD };
for (int i = 0; i < 5; i++) {
// register new datanode
String uuid = "UUID-" + i;
String ip = "IP-" + i;
DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid);
Mockito.when(dr.getIpAddr()).thenReturn(ip);
Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000");
Mockito.when(dr.getXferPort()).thenReturn(9000);
Mockito.when(dr.getSoftwareVersion()).thenReturn("version1");
dm.registerDatanode(dr);
// get location and storage information
locs[i] = dm.getDatanode(uuid);
storageIDs[i] = "storageID-" + i;
}
// set first 2 locations as decomissioned
locs[0].setDecommissioned();
locs[1].setDecommissioned();
// create LocatedBlock with above locations
ExtendedBlock b = new ExtendedBlock("somePoolID", 1234);
LocatedBlock block = new LocatedBlock(b, locs, storageIDs, storageTypes);
List<LocatedBlock> blocks = new ArrayList<>();
blocks.add(block);
final String targetIp = locs[4].getIpAddr();
// sort block locations
dm.sortLocatedBlocks(targetIp, blocks);
// check that storage IDs/types are aligned with datanode locs
DatanodeInfo[] sortedLocs = block.getLocations();
storageIDs = block.getStorageIDs();
storageTypes = block.getStorageTypes();
assertThat(sortedLocs.length, is(5));
assertThat(storageIDs.length, is(5));
assertThat(storageTypes.length, is(5));
for (int i = 0; i < sortedLocs.length; i++) {
assertThat(((DatanodeInfoWithStorage) sortedLocs[i]).getStorageID(), is(storageIDs[i]));
assertThat(((DatanodeInfoWithStorage) sortedLocs[i]).getStorageType(), is(storageTypes[i]));
}
// Ensure the local node is first.
assertThat(sortedLocs[0].getIpAddr(), is(targetIp));
// Ensure the two decommissioned DNs were moved to the end.
assertThat(sortedLocs[sortedLocs.length - 1].getAdminState(), is(DatanodeInfo.AdminStates.DECOMMISSIONED));
assertThat(sortedLocs[sortedLocs.length - 2].getAdminState(), is(DatanodeInfo.AdminStates.DECOMMISSIONED));
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestBalancerWithNodeGroup method testBalancerWithRackLocality.
/**
* Create a cluster with even distribution, and a new empty node is added to
* the cluster, then test rack locality for balancer policy.
*/
@Test(timeout = 60000)
public void testBalancerWithRackLocality() throws Exception {
Configuration conf = createConf();
long[] capacities = new long[] { CAPACITY, CAPACITY };
String[] racks = new String[] { RACK0, RACK1 };
String[] nodeGroups = new String[] { NODEGROUP0, NODEGROUP1 };
int numOfDatanodes = capacities.length;
assertEquals(numOfDatanodes, racks.length);
MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities);
MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
cluster = new MiniDFSClusterWithNodeGroup(builder);
try {
cluster.waitActive();
client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
long totalCapacity = TestBalancer.sum(capacities);
// fill up the cluster to be 30% full
long totalUsedSpace = totalCapacity * 3 / 10;
long length = totalUsedSpace / numOfDatanodes;
TestBalancer.createFile(cluster, filePath, length, (short) numOfDatanodes, 0);
LocatedBlocks lbs = client.getBlockLocations(filePath.toUri().getPath(), 0, length);
Set<ExtendedBlock> before = getBlocksOnRack(lbs.getLocatedBlocks(), RACK0);
long newCapacity = CAPACITY;
String newRack = RACK1;
String newNodeGroup = NODEGROUP2;
// start up an empty node with the same capacity and on the same rack
cluster.startDataNodes(conf, 1, true, null, new String[] { newRack }, new long[] { newCapacity }, new String[] { newNodeGroup });
totalCapacity += newCapacity;
// run balancer and validate results
runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);
lbs = client.getBlockLocations(filePath.toUri().getPath(), 0, length);
Set<ExtendedBlock> after = getBlocksOnRack(lbs.getLocatedBlocks(), RACK0);
assertEquals(before, after);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestBalancerWithMultipleNameNodes method generateBlocks.
/* fill up a cluster with <code>numNodes</code> datanodes
* whose used space to be <code>size</code>
*/
private static ExtendedBlock[][] generateBlocks(Suite s, long size) throws IOException, InterruptedException, TimeoutException {
final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][];
for (int n = 0; n < s.clients.length; n++) {
createFile(s, n, size);
final List<LocatedBlock> locatedBlocks = s.clients[n].getBlockLocations(FILE_NAME, 0, size).getLocatedBlocks();
final int numOfBlocks = locatedBlocks.size();
blocks[n] = new ExtendedBlock[numOfBlocks];
for (int i = 0; i < numOfBlocks; i++) {
final ExtendedBlock b = locatedBlocks.get(i).getBlock();
blocks[n][i] = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(), b.getNumBytes(), b.getGenerationStamp());
}
}
return blocks;
}
Aggregations