use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class TestReplicationPolicyWithUpgradeDomain method testChooseReplicasToDelete.
/**
* Verify the correct replica is chosen to satisfy both rack and upgrade
* domain policy.
* @throws Exception
*/
@Test
public void testChooseReplicasToDelete() throws Exception {
Collection<DatanodeStorageInfo> nonExcess = new ArrayList<>();
nonExcess.add(storages[0]);
nonExcess.add(storages[1]);
nonExcess.add(storages[2]);
nonExcess.add(storages[3]);
List<DatanodeStorageInfo> excessReplicas;
BlockStoragePolicySuite POLICY_SUITE = BlockStoragePolicySuite.createDefaultSuite();
BlockStoragePolicy storagePolicy = POLICY_SUITE.getDefaultPolicy();
// delete hint accepted.
DatanodeDescriptor delHintNode = storages[0].getDatanodeDescriptor();
List<StorageType> excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[3].getDatanodeDescriptor(), delHintNode);
assertTrue(excessReplicas.size() == 1);
assertTrue(excessReplicas.contains(storages[0]));
// delete hint rejected because deleting storages[1] would have
// cause only two upgrade domains left.
delHintNode = storages[1].getDatanodeDescriptor();
excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[3].getDatanodeDescriptor(), delHintNode);
assertTrue(excessReplicas.size() == 1);
assertTrue(excessReplicas.contains(storages[0]));
// no delete hint, case 1
nonExcess.clear();
nonExcess.add(storages[0]);
nonExcess.add(storages[1]);
nonExcess.add(storages[4]);
nonExcess.add(storages[8]);
excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[8].getDatanodeDescriptor(), null);
assertTrue(excessReplicas.size() == 1);
assertTrue(excessReplicas.contains(storages[1]));
// no delete hint, case 2
nonExcess.clear();
nonExcess.add(storages[0]);
nonExcess.add(storages[1]);
nonExcess.add(storages[4]);
nonExcess.add(storages[5]);
excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[8].getDatanodeDescriptor(), null);
assertTrue(excessReplicas.size() == 1);
assertTrue(excessReplicas.contains(storages[1]) || excessReplicas.contains(storages[4]));
// No delete hint, different excess type deletion
nonExcess.clear();
nonExcess.add(storages[0]);
nonExcess.add(storages[1]);
nonExcess.add(storages[2]);
nonExcess.add(storages[3]);
DatanodeStorageInfo excessStorage = DFSTestUtil.createDatanodeStorageInfo("Storage-excess-ID", "localhost", delHintNode.getNetworkLocation(), "foo.com", StorageType.ARCHIVE, delHintNode.getUpgradeDomain());
nonExcess.add(excessStorage);
excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[3].getDatanodeDescriptor(), null);
assertTrue(excessReplicas.size() == 2);
assertTrue(excessReplicas.contains(storages[0]));
assertTrue(excessReplicas.contains(excessStorage));
// Test SSD related deletion. With different rack settings here, but
// similar to {@link TestReplicationPolicy#testChooseReplicasToDelete}.
// The block was initially created on excessSSD(rack r1, UD 4),
// storages[7](rack r3, UD 2) and storages[8](rack r3, UD 3) with
// ONESSD_STORAGE_POLICY_NAME storage policy. Replication factor = 3.
// Right after balancer moves the block from storages[7] to
// storages[3](rack r2, UD 1), the application changes the storage policy
// from ONESSD_STORAGE_POLICY_NAME to HOT_STORAGE_POLICY_ID. In this case,
// we should be able to delete excessSSD since the remaining
// storages ({storages[3]}, {storages[7], storages[8]})
// are on different racks (r2, r3) and different UDs (1, 2, 3).
DatanodeStorageInfo excessSSD = DFSTestUtil.createDatanodeStorageInfo("Storage-excess-SSD-ID", "localhost", storages[0].getDatanodeDescriptor().getNetworkLocation(), "foo.com", StorageType.SSD, null);
DatanodeStorageInfo[] ssds = { excessSSD };
DatanodeDescriptor[] ssdNodes = DFSTestUtil.toDatanodeDescriptor(ssds);
ssdNodes[0].setUpgradeDomain(Integer.toString(4));
nonExcess.clear();
nonExcess.add(excessSSD);
nonExcess.add(storages[3]);
nonExcess.add(storages[7]);
nonExcess.add(storages[8]);
excessTypes = storagePolicy.chooseExcess((short) 3, DatanodeStorageInfo.toStorageTypes(nonExcess));
excessReplicas = replicator.chooseReplicasToDelete(nonExcess, nonExcess, 3, excessTypes, storages[3].getDatanodeDescriptor(), storages[7].getDatanodeDescriptor());
assertEquals(1, excessReplicas.size());
assertTrue(excessReplicas.contains(excessSSD));
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class DiskBalancerTestUtil method createRandomDataNode.
/**
* Creates a RandomDataNode.
*
* @param diskTypes - Storage types needed in the Node
* @param diskCount - Disk count - that many disks of each type is created
* @return DataNode
* @throws Exception
*/
public DiskBalancerDataNode createRandomDataNode(StorageType[] diskTypes, int diskCount) throws Exception {
Preconditions.checkState(diskTypes.length > 0);
Preconditions.checkState(diskCount > 0);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
for (StorageType t : diskTypes) {
DiskBalancerVolumeSet vSet = createRandomVolumeSet(t, diskCount);
for (DiskBalancerVolume v : vSet.getVolumes()) {
node.addVolume(v);
}
}
return node;
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class TestQuotaByStorageType method testQuotaByStorageTypePersistenceInEditLog.
@Test
public void testQuotaByStorageTypePersistenceInEditLog() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
final Path testDir = new Path(dir, METHOD_NAME);
Path createdFile1 = new Path(testDir, "created_file1.data");
dfs.mkdirs(testDir);
// set storage policy on testDir to ONESSD
dfs.setStoragePolicy(testDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// set quota by storage type on testDir
final long SSD_QUOTA = BLOCKSIZE * 4;
dfs.setQuotaByStorageType(testDir, StorageType.SSD, SSD_QUOTA);
INode testDirNode = fsdir.getINode4Write(testDir.toString());
assertTrue(testDirNode.isDirectory());
assertTrue(testDirNode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under testDir
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify SSD consumed before namenode restart
long ssdConsumed = testDirNode.asDirectory().getDirectoryWithQuotaFeature().getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
// Restart namenode to make sure the editlog is correct
cluster.restartNameNode(true);
refreshClusterState();
INode testDirNodeAfterNNRestart = fsdir.getINode4Write(testDir.toString());
// Verify quota is still set
assertTrue(testDirNode.isDirectory());
assertTrue(testDirNode.isQuotaSet());
QuotaCounts qc = testDirNodeAfterNNRestart.getQuotaCounts();
assertEquals(SSD_QUOTA, qc.getTypeSpace(StorageType.SSD));
for (StorageType t : StorageType.getTypesSupportingQuota()) {
if (t != StorageType.SSD) {
assertEquals(HdfsConstants.QUOTA_RESET, qc.getTypeSpace(t));
}
}
long ssdConsumedAfterNNRestart = testDirNodeAfterNNRestart.asDirectory().getDirectoryWithQuotaFeature().getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumedAfterNNRestart);
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class TestQuotaByStorageType method testContentSummaryWithoutStoragePolicy.
@Test(timeout = 60000)
public void testContentSummaryWithoutStoragePolicy() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(!fnode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify getContentSummary without any quota set
// Expect no type quota and usage information available
ContentSummary cs = dfs.getContentSummary(foo);
assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
for (StorageType t : StorageType.values()) {
assertEquals(cs.getTypeConsumed(t), 0);
assertEquals(cs.getTypeQuota(t), -1);
}
}
use of org.apache.hadoop.fs.StorageType in project hadoop by apache.
the class TestQuotaByStorageType method testQuotaByStorageTypePersistenceInFsImage.
@Test
public void testQuotaByStorageTypePersistenceInFsImage() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
final Path testDir = new Path(dir, METHOD_NAME);
Path createdFile1 = new Path(testDir, "created_file1.data");
dfs.mkdirs(testDir);
// set storage policy on testDir to ONESSD
dfs.setStoragePolicy(testDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// set quota by storage type on testDir
final long SSD_QUOTA = BLOCKSIZE * 4;
dfs.setQuotaByStorageType(testDir, StorageType.SSD, SSD_QUOTA);
INode testDirNode = fsdir.getINode4Write(testDir.toString());
assertTrue(testDirNode.isDirectory());
assertTrue(testDirNode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under testDir
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify SSD consumed before namenode restart
long ssdConsumed = testDirNode.asDirectory().getDirectoryWithQuotaFeature().getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
// Restart the namenode with checkpoint to make sure fsImage is correct
dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
dfs.saveNamespace();
dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode(true);
refreshClusterState();
INode testDirNodeAfterNNRestart = fsdir.getINode4Write(testDir.toString());
assertTrue(testDirNode.isDirectory());
assertTrue(testDirNode.isQuotaSet());
QuotaCounts qc = testDirNodeAfterNNRestart.getQuotaCounts();
assertEquals(SSD_QUOTA, qc.getTypeSpace(StorageType.SSD));
for (StorageType t : StorageType.getTypesSupportingQuota()) {
if (t != StorageType.SSD) {
assertEquals(HdfsConstants.QUOTA_RESET, qc.getTypeSpace(t));
}
}
long ssdConsumedAfterNNRestart = testDirNodeAfterNNRestart.asDirectory().getDirectoryWithQuotaFeature().getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumedAfterNNRestart);
}
Aggregations