use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestFsck method testUpgradeDomain.
private void testUpgradeDomain(boolean defineUpgradeDomain, boolean displayUpgradeDomain) throws Exception {
final short replFactor = 1;
final short numDN = 1;
final long blockSize = 512;
final long fileSize = 1024;
final String upgradeDomain = "ud1";
final String[] racks = { "/rack1" };
final String[] hosts = { "127.0.0.1" };
HostsFileWriter hostsFileWriter = new HostsFileWriter();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
if (defineUpgradeDomain) {
conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY, CombinedHostFileManager.class, HostConfigManager.class);
hostsFileWriter.initialize(conf, "temp/fsckupgradedomain");
}
DistributedFileSystem dfs;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDN).hosts(hosts).racks(racks).build();
cluster.waitClusterUp();
dfs = cluster.getFileSystem();
// Configure the upgrade domain on the datanode
if (defineUpgradeDomain) {
DatanodeAdminProperties dnProp = new DatanodeAdminProperties();
DatanodeID datanodeID = cluster.getDataNodes().get(0).getDatanodeId();
dnProp.setHostName(datanodeID.getHostName());
dnProp.setPort(datanodeID.getXferPort());
dnProp.setUpgradeDomain(upgradeDomain);
hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { dnProp });
cluster.getFileSystem().refreshNodes();
}
// create files
final String testFile = new String("/testfile");
final Path path = new Path(testFile);
DFSTestUtil.createFile(dfs, path, fileSize, replFactor, 1000L);
DFSTestUtil.waitReplication(dfs, path, replFactor);
try {
String fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks", displayUpgradeDomain ? "-upgradedomains" : "-locations");
assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
String udValue = defineUpgradeDomain ? upgradeDomain : NamenodeFsck.UNDEFINED;
assertEquals(displayUpgradeDomain, fsckOut.contains("(ud=" + udValue + ")"));
} finally {
if (defineUpgradeDomain) {
hostsFileWriter.cleanup();
}
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestPeerCache method testAddAndRetrieve.
@Test
public void testAddAndRetrieve() throws Exception {
PeerCache cache = new PeerCache(3, 100000);
DatanodeID dnId = new DatanodeID("192.168.0.1", "fakehostname", "fake_datanode_id", 100, 101, 102, 103);
FakePeer peer = new FakePeer(dnId, false);
cache.put(dnId, peer);
assertTrue(!peer.isClosed());
assertEquals(1, cache.size());
assertEquals(peer, cache.get(dnId, false));
assertEquals(0, cache.size());
cache.close();
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestPeerCache method testEviction.
@Test
public void testEviction() throws Exception {
final int CAPACITY = 3;
PeerCache cache = new PeerCache(CAPACITY, 100000);
DatanodeID[] dnIds = new DatanodeID[CAPACITY + 1];
FakePeer[] peers = new FakePeer[CAPACITY + 1];
for (int i = 0; i < dnIds.length; ++i) {
dnIds[i] = new DatanodeID("192.168.0.1", "fakehostname_" + i, "fake_datanode_id_" + i, 100, 101, 102, 103);
peers[i] = new FakePeer(dnIds[i], false);
}
for (int i = 0; i < CAPACITY; ++i) {
cache.put(dnIds[i], peers[i]);
}
// Check that the peers are cached
assertEquals(CAPACITY, cache.size());
// Add another entry and check that the first entry was evicted
cache.put(dnIds[CAPACITY], peers[CAPACITY]);
assertEquals(CAPACITY, cache.size());
assertSame(null, cache.get(dnIds[0], false));
// Make sure that the other entries are still there
for (int i = 1; i < CAPACITY; ++i) {
Peer peer = cache.get(dnIds[i], false);
assertSame(peers[i], peer);
assertTrue(!peer.isClosed());
peer.close();
}
assertEquals(1, cache.size());
cache.close();
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestPeerCache method testMultiplePeersWithSameKey.
@Test
public void testMultiplePeersWithSameKey() throws Exception {
final int CAPACITY = 3;
PeerCache cache = new PeerCache(CAPACITY, 100000);
DatanodeID dnId = new DatanodeID("192.168.0.1", "fakehostname", "fake_datanode_id", 100, 101, 102, 103);
HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY);
for (int i = 0; i < CAPACITY; ++i) {
FakePeer peer = new FakePeer(dnId, false);
peers.add(peer);
cache.put(dnId, peer);
}
// Check that all of the peers ended up in the cache
assertEquals(CAPACITY, cache.size());
while (!peers.isEmpty()) {
Peer peer = cache.get(dnId, false);
assertTrue(peer != null);
assertTrue(!peer.isClosed());
peers.remove(peer);
}
assertEquals(0, cache.size());
cache.close();
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestBalancer method runBalancerAndVerifyBlockPlacmentPolicy.
private void runBalancerAndVerifyBlockPlacmentPolicy(Configuration conf, long[] capacities, String[] hosts, String[] racks, String[] UDs, long newCapacity, String newHost, String newRack, String newUD) throws Exception {
int numOfDatanodes = capacities.length;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length).hosts(hosts).racks(racks).simulatedCapacities(capacities).build();
DatanodeManager dm = cluster.getNamesystem().getBlockManager().getDatanodeManager();
if (UDs != null) {
for (int i = 0; i < UDs.length; i++) {
DatanodeID datanodeId = cluster.getDataNodes().get(i).getDatanodeId();
dm.getDatanode(datanodeId).setUpgradeDomain(UDs[i]);
}
}
try {
cluster.waitActive();
client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
// fill up the cluster to be 80% full
long totalCapacity = sum(capacities);
long totalUsedSpace = totalCapacity * 8 / 10;
final long fileSize = totalUsedSpace / numOfDatanodes;
DFSTestUtil.createFile(cluster.getFileSystem(0), filePath, false, 1024, fileSize, DEFAULT_BLOCK_SIZE, (short) numOfDatanodes, 0, false);
// start up an empty node with the same capacity on the same rack as the
// pinned host.
cluster.startDataNodes(conf, 1, true, null, new String[] { newRack }, new String[] { newHost }, new long[] { newCapacity });
if (newUD != null) {
DatanodeID newId = cluster.getDataNodes().get(numOfDatanodes).getDatanodeId();
dm.getDatanode(newId).setUpgradeDomain(newUD);
}
totalCapacity += newCapacity;
// run balancer and validate results
waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
// start rebalancing
Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
Balancer.run(namenodes, BalancerParameters.DEFAULT, conf);
BlockPlacementPolicy placementPolicy = cluster.getNamesystem().getBlockManager().getBlockPlacementPolicy();
List<LocatedBlock> locatedBlocks = client.getBlockLocations(fileName, 0, fileSize).getLocatedBlocks();
for (LocatedBlock locatedBlock : locatedBlocks) {
BlockPlacementStatus status = placementPolicy.verifyBlockPlacement(locatedBlock.getLocations(), numOfDatanodes);
assertTrue(status.isPlacementPolicySatisfied());
}
} finally {
cluster.shutdown();
}
}
Aggregations