Search in sources :

Example 86 with HashSet

use of java.util.HashSet in project hadoop by apache.

the class TestFsDatasetImpl method testRemoveVolumes.

@Test(timeout = 30000)
public void testRemoveVolumes() throws IOException {
    // Feed FsDataset with block metadata.
    final int NUM_BLOCKS = 100;
    for (int i = 0; i < NUM_BLOCKS; i++) {
        String bpid = BLOCK_POOL_IDS[NUM_BLOCKS % BLOCK_POOL_IDS.length];
        ExtendedBlock eb = new ExtendedBlock(bpid, i);
        try (ReplicaHandler replica = dataset.createRbw(StorageType.DEFAULT, eb, false)) {
        }
    }
    final String[] dataDirs = conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
    final String volumePathToRemove = dataDirs[0];
    Set<StorageLocation> volumesToRemove = new HashSet<>();
    volumesToRemove.add(StorageLocation.parse(volumePathToRemove));
    FsVolumeReferences volReferences = dataset.getFsVolumeReferences();
    FsVolumeImpl volumeToRemove = null;
    for (FsVolumeSpi vol : volReferences) {
        if (vol.getStorageLocation().equals(volumesToRemove.iterator().next())) {
            volumeToRemove = (FsVolumeImpl) vol;
        }
    }
    assertTrue(volumeToRemove != null);
    volReferences.close();
    dataset.removeVolumes(volumesToRemove, true);
    int expectedNumVolumes = dataDirs.length - 1;
    assertEquals("The volume has been removed from the volumeList.", expectedNumVolumes, getNumVolumes());
    assertEquals("The volume has been removed from the storageMap.", expectedNumVolumes, dataset.storageMap.size());
    try {
        dataset.asyncDiskService.execute(volumeToRemove, new Runnable() {

            @Override
            public void run() {
            }
        });
        fail("Expect RuntimeException: the volume has been removed from the " + "AsyncDiskService.");
    } catch (RuntimeException e) {
        GenericTestUtils.assertExceptionContains("Cannot find volume", e);
    }
    int totalNumReplicas = 0;
    for (String bpid : dataset.volumeMap.getBlockPoolList()) {
        totalNumReplicas += dataset.volumeMap.size(bpid);
    }
    assertEquals("The replica infos on this volume has been removed from the " + "volumeMap.", NUM_BLOCKS / NUM_INIT_VOLUMES, totalNumReplicas);
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) Matchers.anyString(org.mockito.Matchers.anyString) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 87 with HashSet

use of java.util.HashSet in project hadoop by apache.

the class TestRetryCacheWithHA method testListCachePools.

/**
   * Add a list of cache pools, list cache pools,
   * switch active NN, and list cache pools again.
   */
@Test(timeout = 60000)
public void testListCachePools() throws Exception {
    final int poolCount = 7;
    HashSet<String> poolNames = new HashSet<String>(poolCount);
    for (int i = 0; i < poolCount; i++) {
        String poolName = "testListCachePools-" + i;
        dfs.addCachePool(new CachePoolInfo(poolName));
        poolNames.add(poolName);
    }
    listCachePools(poolNames, 0);
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
    cluster.waitActive(1);
    listCachePools(poolNames, 1);
}
Also used : CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 88 with HashSet

use of java.util.HashSet in project hadoop by apache.

the class HostsFileWriter method initOutOfServiceHosts.

public void initOutOfServiceHosts(List<String> decommissionHostNameAndPorts, Map<String, Long> maintenanceHosts) throws IOException {
    StringBuilder excludeHosts = new StringBuilder();
    if (isLegacyHostsFile) {
        if (maintenanceHosts != null && maintenanceHosts.size() > 0) {
            throw new UnsupportedOperationException("maintenance support isn't supported by legacy hosts file");
        }
        for (String hostNameAndPort : decommissionHostNameAndPorts) {
            excludeHosts.append(hostNameAndPort).append("\n");
        }
        DFSTestUtil.writeFile(localFileSys, excludeFile, excludeHosts.toString());
    } else {
        HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
        if (decommissionHostNameAndPorts != null) {
            for (String hostNameAndPort : decommissionHostNameAndPorts) {
                DatanodeAdminProperties dn = new DatanodeAdminProperties();
                String[] hostAndPort = hostNameAndPort.split(":");
                dn.setHostName(hostAndPort[0]);
                dn.setPort(Integer.parseInt(hostAndPort[1]));
                dn.setAdminState(AdminStates.DECOMMISSIONED);
                allDNs.add(dn);
            }
        }
        if (maintenanceHosts != null) {
            for (Map.Entry<String, Long> hostEntry : maintenanceHosts.entrySet()) {
                DatanodeAdminProperties dn = new DatanodeAdminProperties();
                String[] hostAndPort = hostEntry.getKey().split(":");
                dn.setHostName(hostAndPort[0]);
                dn.setPort(Integer.parseInt(hostAndPort[1]));
                dn.setAdminState(AdminStates.IN_MAINTENANCE);
                dn.setMaintenanceExpireTimeInMS(hostEntry.getValue());
                allDNs.add(dn);
            }
        }
        CombinedHostsFileWriter.writeFile(combinedFile.toString(), allDNs);
    }
}
Also used : DatanodeAdminProperties(org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties) Map(java.util.Map) HashSet(java.util.HashSet)

Example 89 with HashSet

use of java.util.HashSet in project hadoop by apache.

the class HostsFileWriter method initIncludeHosts.

public void initIncludeHosts(String[] hostNameAndPorts) throws IOException {
    StringBuilder includeHosts = new StringBuilder();
    if (isLegacyHostsFile) {
        for (String hostNameAndPort : hostNameAndPorts) {
            includeHosts.append(hostNameAndPort).append("\n");
        }
        DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
    } else {
        HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
        for (String hostNameAndPort : hostNameAndPorts) {
            String[] hostAndPort = hostNameAndPort.split(":");
            DatanodeAdminProperties dn = new DatanodeAdminProperties();
            dn.setHostName(hostAndPort[0]);
            dn.setPort(Integer.parseInt(hostAndPort[1]));
            allDNs.add(dn);
        }
        CombinedHostsFileWriter.writeFile(combinedFile.toString(), allDNs);
    }
}
Also used : DatanodeAdminProperties(org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties) HashSet(java.util.HashSet)

Example 90 with HashSet

use of java.util.HashSet in project hadoop by apache.

the class TestRMContainerAllocator method checkAssignments.

private void checkAssignments(ContainerRequestEvent[] requests, List<TaskAttemptContainerAssignedEvent> assignments, boolean checkHostMatch) {
    Assert.assertNotNull("Container not assigned", assignments);
    Assert.assertEquals("Assigned count not correct", requests.length, assignments.size());
    // check for uniqueness of containerIDs
    Set<ContainerId> containerIds = new HashSet<ContainerId>();
    for (TaskAttemptContainerAssignedEvent assigned : assignments) {
        containerIds.add(assigned.getContainer().getId());
    }
    Assert.assertEquals("Assigned containers must be different", assignments.size(), containerIds.size());
    // check for all assignment
    for (ContainerRequestEvent req : requests) {
        TaskAttemptContainerAssignedEvent assigned = null;
        for (TaskAttemptContainerAssignedEvent ass : assignments) {
            if (ass.getTaskAttemptID().equals(req.getAttemptID())) {
                assigned = ass;
                break;
            }
        }
        checkAssignment(req, assigned, checkHostMatch);
    }
}
Also used : ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) HashSet(java.util.HashSet) TaskAttemptContainerAssignedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent)

Aggregations

HashSet (java.util.HashSet)12137 Set (java.util.Set)2609 ArrayList (java.util.ArrayList)2318 HashMap (java.util.HashMap)2096 Test (org.junit.Test)2060 Map (java.util.Map)1198 Iterator (java.util.Iterator)979 IOException (java.io.IOException)934 List (java.util.List)911 File (java.io.File)607 LinkedHashSet (java.util.LinkedHashSet)460 Test (org.testng.annotations.Test)460 TreeSet (java.util.TreeSet)271 Collection (java.util.Collection)233 LinkedList (java.util.LinkedList)224 Region (org.apache.geode.cache.Region)202 SSOException (com.iplanet.sso.SSOException)188 Date (java.util.Date)180 LinkedHashMap (java.util.LinkedHashMap)169 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)166