use of java.util.HashSet in project hadoop by apache.
the class TestFsDatasetImpl method testRemoveVolumes.
@Test(timeout = 30000)
public void testRemoveVolumes() throws IOException {
// Feed FsDataset with block metadata.
final int NUM_BLOCKS = 100;
for (int i = 0; i < NUM_BLOCKS; i++) {
String bpid = BLOCK_POOL_IDS[NUM_BLOCKS % BLOCK_POOL_IDS.length];
ExtendedBlock eb = new ExtendedBlock(bpid, i);
try (ReplicaHandler replica = dataset.createRbw(StorageType.DEFAULT, eb, false)) {
}
}
final String[] dataDirs = conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
final String volumePathToRemove = dataDirs[0];
Set<StorageLocation> volumesToRemove = new HashSet<>();
volumesToRemove.add(StorageLocation.parse(volumePathToRemove));
FsVolumeReferences volReferences = dataset.getFsVolumeReferences();
FsVolumeImpl volumeToRemove = null;
for (FsVolumeSpi vol : volReferences) {
if (vol.getStorageLocation().equals(volumesToRemove.iterator().next())) {
volumeToRemove = (FsVolumeImpl) vol;
}
}
assertTrue(volumeToRemove != null);
volReferences.close();
dataset.removeVolumes(volumesToRemove, true);
int expectedNumVolumes = dataDirs.length - 1;
assertEquals("The volume has been removed from the volumeList.", expectedNumVolumes, getNumVolumes());
assertEquals("The volume has been removed from the storageMap.", expectedNumVolumes, dataset.storageMap.size());
try {
dataset.asyncDiskService.execute(volumeToRemove, new Runnable() {
@Override
public void run() {
}
});
fail("Expect RuntimeException: the volume has been removed from the " + "AsyncDiskService.");
} catch (RuntimeException e) {
GenericTestUtils.assertExceptionContains("Cannot find volume", e);
}
int totalNumReplicas = 0;
for (String bpid : dataset.volumeMap.getBlockPoolList()) {
totalNumReplicas += dataset.volumeMap.size(bpid);
}
assertEquals("The replica infos on this volume has been removed from the " + "volumeMap.", NUM_BLOCKS / NUM_INIT_VOLUMES, totalNumReplicas);
}
use of java.util.HashSet in project hadoop by apache.
the class TestRetryCacheWithHA method testListCachePools.
/**
* Add a list of cache pools, list cache pools,
* switch active NN, and list cache pools again.
*/
@Test(timeout = 60000)
public void testListCachePools() throws Exception {
final int poolCount = 7;
HashSet<String> poolNames = new HashSet<String>(poolCount);
for (int i = 0; i < poolCount; i++) {
String poolName = "testListCachePools-" + i;
dfs.addCachePool(new CachePoolInfo(poolName));
poolNames.add(poolName);
}
listCachePools(poolNames, 0);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
cluster.waitActive(1);
listCachePools(poolNames, 1);
}
use of java.util.HashSet in project hadoop by apache.
the class HostsFileWriter method initOutOfServiceHosts.
public void initOutOfServiceHosts(List<String> decommissionHostNameAndPorts, Map<String, Long> maintenanceHosts) throws IOException {
StringBuilder excludeHosts = new StringBuilder();
if (isLegacyHostsFile) {
if (maintenanceHosts != null && maintenanceHosts.size() > 0) {
throw new UnsupportedOperationException("maintenance support isn't supported by legacy hosts file");
}
for (String hostNameAndPort : decommissionHostNameAndPorts) {
excludeHosts.append(hostNameAndPort).append("\n");
}
DFSTestUtil.writeFile(localFileSys, excludeFile, excludeHosts.toString());
} else {
HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
if (decommissionHostNameAndPorts != null) {
for (String hostNameAndPort : decommissionHostNameAndPorts) {
DatanodeAdminProperties dn = new DatanodeAdminProperties();
String[] hostAndPort = hostNameAndPort.split(":");
dn.setHostName(hostAndPort[0]);
dn.setPort(Integer.parseInt(hostAndPort[1]));
dn.setAdminState(AdminStates.DECOMMISSIONED);
allDNs.add(dn);
}
}
if (maintenanceHosts != null) {
for (Map.Entry<String, Long> hostEntry : maintenanceHosts.entrySet()) {
DatanodeAdminProperties dn = new DatanodeAdminProperties();
String[] hostAndPort = hostEntry.getKey().split(":");
dn.setHostName(hostAndPort[0]);
dn.setPort(Integer.parseInt(hostAndPort[1]));
dn.setAdminState(AdminStates.IN_MAINTENANCE);
dn.setMaintenanceExpireTimeInMS(hostEntry.getValue());
allDNs.add(dn);
}
}
CombinedHostsFileWriter.writeFile(combinedFile.toString(), allDNs);
}
}
use of java.util.HashSet in project hadoop by apache.
the class HostsFileWriter method initIncludeHosts.
public void initIncludeHosts(String[] hostNameAndPorts) throws IOException {
StringBuilder includeHosts = new StringBuilder();
if (isLegacyHostsFile) {
for (String hostNameAndPort : hostNameAndPorts) {
includeHosts.append(hostNameAndPort).append("\n");
}
DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
} else {
HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
for (String hostNameAndPort : hostNameAndPorts) {
String[] hostAndPort = hostNameAndPort.split(":");
DatanodeAdminProperties dn = new DatanodeAdminProperties();
dn.setHostName(hostAndPort[0]);
dn.setPort(Integer.parseInt(hostAndPort[1]));
allDNs.add(dn);
}
CombinedHostsFileWriter.writeFile(combinedFile.toString(), allDNs);
}
}
use of java.util.HashSet in project hadoop by apache.
the class TestRMContainerAllocator method checkAssignments.
private void checkAssignments(ContainerRequestEvent[] requests, List<TaskAttemptContainerAssignedEvent> assignments, boolean checkHostMatch) {
Assert.assertNotNull("Container not assigned", assignments);
Assert.assertEquals("Assigned count not correct", requests.length, assignments.size());
// check for uniqueness of containerIDs
Set<ContainerId> containerIds = new HashSet<ContainerId>();
for (TaskAttemptContainerAssignedEvent assigned : assignments) {
containerIds.add(assigned.getContainer().getId());
}
Assert.assertEquals("Assigned containers must be different", assignments.size(), containerIds.size());
// check for all assignment
for (ContainerRequestEvent req : requests) {
TaskAttemptContainerAssignedEvent assigned = null;
for (TaskAttemptContainerAssignedEvent ass : assignments) {
if (ass.getTaskAttemptID().equals(req.getAttemptID())) {
assigned = ass;
break;
}
}
checkAssignment(req, assigned, checkHostMatch);
}
}
Aggregations