Search in sources :

Example 26 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestBlockStatsMXBean method setup.

@Before
public void setup() throws IOException {
    HdfsConfiguration conf = new HdfsConfiguration();
    conf.setTimeDuration(DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY, 0, TimeUnit.MILLISECONDS);
    cluster = null;
    StorageType[][] types = new StorageType[6][];
    for (int i = 0; i < 3; i++) {
        types[i] = new StorageType[] { StorageType.RAM_DISK, StorageType.DISK };
    }
    for (int i = 3; i < 5; i++) {
        types[i] = new StorageType[] { StorageType.RAM_DISK, StorageType.ARCHIVE };
    }
    types[5] = new StorageType[] { StorageType.RAM_DISK, StorageType.ARCHIVE, StorageType.ARCHIVE };
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(6).storageTypes(types).storagesPerDatanode(3).build();
    cluster.waitActive();
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Before(org.junit.Before)

Example 27 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestDatanodeManager method HelperFunction.

/**
   * Helper function that tests the DatanodeManagers SortedBlock function
   * we invoke this function with and without topology scripts
   *
   * @param scriptFileName - Script Name or null
   *
   * @throws URISyntaxException
   * @throws IOException
   */
public void HelperFunction(String scriptFileName) throws URISyntaxException, IOException {
    // create the DatanodeManager which will be tested
    Configuration conf = new Configuration();
    FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
    Mockito.when(fsn.hasWriteLock()).thenReturn(true);
    if (scriptFileName != null && !scriptFileName.isEmpty()) {
        URL shellScript = getClass().getResource(scriptFileName);
        Path resourcePath = Paths.get(shellScript.toURI());
        FileUtil.setExecutable(resourcePath.toFile(), true);
        conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, resourcePath.toString());
    }
    DatanodeManager dm = mockDatanodeManager(fsn, conf);
    // register 5 datanodes, each with different storage ID and type
    DatanodeInfo[] locs = new DatanodeInfo[5];
    String[] storageIDs = new String[5];
    StorageType[] storageTypes = new StorageType[] { StorageType.ARCHIVE, StorageType.DEFAULT, StorageType.DISK, StorageType.RAM_DISK, StorageType.SSD };
    for (int i = 0; i < 5; i++) {
        // register new datanode
        String uuid = "UUID-" + i;
        String ip = "IP-" + i;
        DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
        Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid);
        Mockito.when(dr.getIpAddr()).thenReturn(ip);
        Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000");
        Mockito.when(dr.getXferPort()).thenReturn(9000);
        Mockito.when(dr.getSoftwareVersion()).thenReturn("version1");
        dm.registerDatanode(dr);
        // get location and storage information
        locs[i] = dm.getDatanode(uuid);
        storageIDs[i] = "storageID-" + i;
    }
    // set first 2 locations as decomissioned
    locs[0].setDecommissioned();
    locs[1].setDecommissioned();
    // create LocatedBlock with above locations
    ExtendedBlock b = new ExtendedBlock("somePoolID", 1234);
    LocatedBlock block = new LocatedBlock(b, locs, storageIDs, storageTypes);
    List<LocatedBlock> blocks = new ArrayList<>();
    blocks.add(block);
    final String targetIp = locs[4].getIpAddr();
    // sort block locations
    dm.sortLocatedBlocks(targetIp, blocks);
    // check that storage IDs/types are aligned with datanode locs
    DatanodeInfo[] sortedLocs = block.getLocations();
    storageIDs = block.getStorageIDs();
    storageTypes = block.getStorageTypes();
    assertThat(sortedLocs.length, is(5));
    assertThat(storageIDs.length, is(5));
    assertThat(storageTypes.length, is(5));
    for (int i = 0; i < sortedLocs.length; i++) {
        assertThat(((DatanodeInfoWithStorage) sortedLocs[i]).getStorageID(), is(storageIDs[i]));
        assertThat(((DatanodeInfoWithStorage) sortedLocs[i]).getStorageType(), is(storageTypes[i]));
    }
    // Ensure the local node is first.
    assertThat(sortedLocs[0].getIpAddr(), is(targetIp));
    // Ensure the two decommissioned DNs were moved to the end.
    assertThat(sortedLocs[sortedLocs.length - 1].getAdminState(), is(DatanodeInfo.AdminStates.DECOMMISSIONED));
    assertThat(sortedLocs[sortedLocs.length - 2].getAdminState(), is(DatanodeInfo.AdminStates.DECOMMISSIONED));
}
Also used : Path(java.nio.file.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) URL(java.net.URL) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem)

Example 28 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestBlockManager method testUseDelHint.

@Test
public void testUseDelHint() {
    DatanodeStorageInfo delHint = new DatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("id"));
    List<DatanodeStorageInfo> moreThan1Racks = Arrays.asList(delHint);
    List<StorageType> excessTypes = new ArrayList<>();
    BlockPlacementPolicyDefault policyDefault = (BlockPlacementPolicyDefault) bm.getBlockPlacementPolicy();
    excessTypes.add(StorageType.DEFAULT);
    Assert.assertTrue(policyDefault.useDelHint(delHint, null, moreThan1Racks, null, excessTypes));
    excessTypes.remove(0);
    excessTypes.add(StorageType.SSD);
    Assert.assertFalse(policyDefault.useDelHint(delHint, null, moreThan1Racks, null, excessTypes));
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 29 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class DFSInputStream method getBestNodeDNAddrPair.

/**
   * Get the best node from which to stream the data.
   * @param block LocatedBlock, containing nodes in priority order.
   * @param ignoredNodes Do not choose nodes in this array (may be null)
   * @return The DNAddrPair of the best node. Null if no node can be chosen.
   */
protected DNAddrPair getBestNodeDNAddrPair(LocatedBlock block, Collection<DatanodeInfo> ignoredNodes) {
    DatanodeInfo[] nodes = block.getLocations();
    StorageType[] storageTypes = block.getStorageTypes();
    DatanodeInfo chosenNode = null;
    StorageType storageType = null;
    if (nodes != null) {
        for (int i = 0; i < nodes.length; i++) {
            if (!deadNodes.containsKey(nodes[i]) && (ignoredNodes == null || !ignoredNodes.contains(nodes[i]))) {
                chosenNode = nodes[i];
                // index to get storage type.
                if (storageTypes != null && i < storageTypes.length) {
                    storageType = storageTypes[i];
                }
                break;
            }
        }
    }
    if (chosenNode == null) {
        reportLostBlock(block, ignoredNodes);
        return null;
    }
    final String dnAddr = chosenNode.getXferAddr(dfsClient.getConf().isConnectToDnViaHostname());
    DFSClient.LOG.debug("Connecting to datanode {}", dnAddr);
    InetSocketAddress targetAddr = NetUtils.createSocketAddr(dnAddr);
    return new DNAddrPair(chosenNode, targetAddr, storageType);
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) InetSocketAddress(java.net.InetSocketAddress)

Example 30 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class DataStreamer method handleBadDatanode.

/**
   * Remove bad node from list of nodes if badNodeIndex was set.
   * @return true if it should continue.
   */
boolean handleBadDatanode() {
    final int badNodeIndex = errorState.getBadNodeIndex();
    if (badNodeIndex >= 0) {
        if (nodes.length <= 1) {
            lastException.set(new IOException("All datanodes " + Arrays.toString(nodes) + " are bad. Aborting..."));
            streamerClosed = true;
            return false;
        }
        LOG.warn("Error Recovery for " + block + " in pipeline " + Arrays.toString(nodes) + ": datanode " + badNodeIndex + "(" + nodes[badNodeIndex] + ") is bad.");
        failed.add(nodes[badNodeIndex]);
        DatanodeInfo[] newnodes = new DatanodeInfo[nodes.length - 1];
        arraycopy(nodes, newnodes, badNodeIndex);
        final StorageType[] newStorageTypes = new StorageType[newnodes.length];
        arraycopy(storageTypes, newStorageTypes, badNodeIndex);
        final String[] newStorageIDs = new String[newnodes.length];
        arraycopy(storageIDs, newStorageIDs, badNodeIndex);
        setPipeline(newnodes, newStorageTypes, newStorageIDs);
        errorState.adjustState4RestartingNode();
        lastException.clear();
    }
    return true;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Aggregations

StorageType (org.apache.hadoop.fs.StorageType)94 Test (org.junit.Test)31 Path (org.apache.hadoop.fs.Path)27 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)25 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)19 Configuration (org.apache.hadoop.conf.Configuration)17 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)16 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)12 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)11 IOException (java.io.IOException)10 ArrayList (java.util.ArrayList)9 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ByteString (com.google.protobuf.ByteString)5 InterruptedIOException (java.io.InterruptedIOException)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 File (java.io.File)4 InetSocketAddress (java.net.InetSocketAddress)4