Search in sources :

Example 26 with DatanodeStorage

use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.

the class FsDatasetImpl method activateVolume.

/**
   * Activate a volume to serve requests.
   * @throws IOException if the storage UUID already exists.
   */
private void activateVolume(ReplicaMap replicaMap, Storage.StorageDirectory sd, StorageType storageType, FsVolumeReference ref) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        DatanodeStorage dnStorage = storageMap.get(sd.getStorageUuid());
        if (dnStorage != null) {
            final String errorMsg = String.format("Found duplicated storage UUID: %s in %s.", sd.getStorageUuid(), sd.getVersionFile());
            LOG.error(errorMsg);
            throw new IOException(errorMsg);
        }
        volumeMap.addAll(replicaMap);
        storageMap.put(sd.getStorageUuid(), new DatanodeStorage(sd.getStorageUuid(), DatanodeStorage.State.NORMAL, storageType));
        asyncDiskService.addVolume((FsVolumeImpl) ref.getVolume());
        volumes.addVolume(ref);
    }
}
Also used : AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 27 with DatanodeStorage

use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.

the class FsDatasetImpl method shutdownBlockPool.

@Override
public void shutdownBlockPool(String bpid) {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        LOG.info("Removing block pool " + bpid);
        Map<DatanodeStorage, BlockListAsLongs> blocksPerVolume = getBlockReports(bpid);
        volumeMap.cleanUpBlockPool(bpid);
        volumes.removeBlockPool(bpid, blocksPerVolume);
    }
}
Also used : AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs)

Example 28 with DatanodeStorage

use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.

the class FsDatasetImpl method getBlockReports.

@Override
public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid) {
    Map<DatanodeStorage, BlockListAsLongs> blockReportsMap = new HashMap<DatanodeStorage, BlockListAsLongs>();
    Map<String, BlockListAsLongs.Builder> builders = new HashMap<String, BlockListAsLongs.Builder>();
    List<FsVolumeImpl> curVolumes = null;
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        curVolumes = volumes.getVolumes();
        for (FsVolumeSpi v : curVolumes) {
            builders.put(v.getStorageID(), BlockListAsLongs.builder(maxDataLength));
        }
        Set<String> missingVolumesReported = new HashSet<>();
        for (ReplicaInfo b : volumeMap.replicas(bpid)) {
            String volStorageID = b.getVolume().getStorageID();
            if (!builders.containsKey(volStorageID)) {
                if (!missingVolumesReported.contains(volStorageID)) {
                    LOG.warn("Storage volume: " + volStorageID + " missing for the" + " replica block: " + b + ". Probably being removed!");
                    missingVolumesReported.add(volStorageID);
                }
                continue;
            }
            switch(b.getState()) {
                case FINALIZED:
                case RBW:
                case RWR:
                    builders.get(b.getVolume().getStorageID()).add(b);
                    break;
                case RUR:
                    ReplicaInfo orig = b.getOriginalReplica();
                    builders.get(b.getVolume().getStorageID()).add(orig);
                    break;
                case TEMPORARY:
                    break;
                default:
                    assert false : "Illegal ReplicaInfo state.";
            }
        }
    }
    for (FsVolumeImpl v : curVolumes) {
        blockReportsMap.put(v.toDatanodeStorage(), builders.get(v.getStorageID()).build());
    }
    return blockReportsMap;
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ReplicaBuilder(org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) HashSet(java.util.HashSet)

Example 29 with DatanodeStorage

use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.

the class DFSTestUtil method createDatanodeStorageInfo.

public static DatanodeStorageInfo createDatanodeStorageInfo(String storageID, String ip, String rack, String hostname, StorageType type, String upgradeDomain) {
    final DatanodeStorage storage = new DatanodeStorage(storageID, DatanodeStorage.State.NORMAL, type);
    final DatanodeDescriptor dn = BlockManagerTestUtil.getDatanodeDescriptor(ip, rack, storage, hostname);
    if (upgradeDomain != null) {
        dn.setUpgradeDomain(upgradeDomain);
    }
    return BlockManagerTestUtil.newDatanodeStorageInfo(dn, storage);
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)

Example 30 with DatanodeStorage

use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.

the class TestBlockStoragePolicy method testChooseSsdOverDisk.

@Test
public void testChooseSsdOverDisk() throws Exception {
    BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1", new StorageType[] { StorageType.SSD, StorageType.DISK, StorageType.ARCHIVE }, new StorageType[] {}, new StorageType[] {});
    final String[] racks = { "/d1/r1", "/d1/r1", "/d1/r1" };
    final String[] hosts = { "host1", "host2", "host3" };
    final StorageType[] disks = { StorageType.DISK, StorageType.DISK, StorageType.DISK };
    final DatanodeStorageInfo[] diskStorages = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
    final DatanodeDescriptor[] dataNodes = DFSTestUtil.toDatanodeDescriptor(diskStorages);
    for (int i = 0; i < dataNodes.length; i++) {
        BlockManagerTestUtil.updateStorage(dataNodes[i], new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL, StorageType.SSD));
    }
    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
    File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath());
    DFSTestUtil.formatNameNode(conf);
    NameNode namenode = new NameNode(conf);
    final BlockManager bm = namenode.getNamesystem().getBlockManager();
    BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
    NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
    for (DatanodeDescriptor datanode : dataNodes) {
        cluster.add(datanode);
    }
    DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3, dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false, new HashSet<Node>(), 0, policy, null);
    System.out.println(policy.getName() + ": " + Arrays.asList(targets));
    Assert.assertEquals(2, targets.length);
    Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
    Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) StorageType(org.apache.hadoop.fs.StorageType) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Node(org.apache.hadoop.net.Node) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) NetworkTopology(org.apache.hadoop.net.NetworkTopology) File(java.io.File) Test(org.junit.Test)

Aggregations

DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)47 Test (org.junit.Test)27 ArrayList (java.util.ArrayList)16 BlockListAsLongs (org.apache.hadoop.hdfs.protocol.BlockListAsLongs)13 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)13 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)12 Block (org.apache.hadoop.hdfs.protocol.Block)11 Path (org.apache.hadoop.fs.Path)10 Configuration (org.apache.hadoop.conf.Configuration)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 StorageReceivedDeletedBlocks (org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks)8 Map (java.util.Map)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)7 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)7 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)7 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)7 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)5 StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)5