Search in sources :

Example 41 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class INodeFile method computeQuotaUsage.

// This is the only place that needs to use the BlockStoragePolicySuite to
// derive the intended storage type usage for quota by storage type
@Override
public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps, byte blockStoragePolicyId, boolean useCache, int lastSnapshotId) {
    final QuotaCounts counts = new QuotaCounts.Builder().nameSpace(1).build();
    final BlockStoragePolicy bsp = (blockStoragePolicyId == BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) ? null : bsps.getPolicy(blockStoragePolicyId);
    FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
    if (sf == null) {
        counts.add(storagespaceConsumed(bsp));
        return counts;
    }
    FileDiffList fileDiffList = sf.getDiffs();
    int last = fileDiffList.getLastSnapshotId();
    if (lastSnapshotId == Snapshot.CURRENT_STATE_ID || last == Snapshot.CURRENT_STATE_ID) {
        counts.add(storagespaceConsumed(bsp));
        return counts;
    }
    final long ssDeltaNoReplication;
    short replication;
    if (isStriped()) {
        return computeQuotaUsageWithStriped(bsp, counts);
    }
    if (last < lastSnapshotId) {
        ssDeltaNoReplication = computeFileSize(true, false);
        replication = getFileReplication();
    } else {
        int sid = fileDiffList.getSnapshotById(lastSnapshotId);
        ssDeltaNoReplication = computeFileSize(sid);
        replication = getFileReplication(sid);
    }
    counts.addStorageSpace(ssDeltaNoReplication * replication);
    if (bsp != null) {
        List<StorageType> storageTypes = bsp.chooseStorageTypes(replication);
        for (StorageType t : storageTypes) {
            if (!t.supportTypeQuota()) {
                continue;
            }
            counts.addTypeSpace(t, ssDeltaNoReplication);
        }
    }
    return counts;
}
Also used : FileWithSnapshotFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature) StorageType(org.apache.hadoop.fs.StorageType) FileDiffList(org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)

Example 42 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestBlockStoragePolicy method testChooseTargetWithTopology.

@Test
public void testChooseTargetWithTopology() throws Exception {
    BlockStoragePolicy policy1 = new BlockStoragePolicy((byte) 9, "TEST1", new StorageType[] { StorageType.SSD, StorageType.DISK, StorageType.ARCHIVE }, new StorageType[] {}, new StorageType[] {});
    BlockStoragePolicy policy2 = new BlockStoragePolicy((byte) 11, "TEST2", new StorageType[] { StorageType.DISK, StorageType.SSD, StorageType.ARCHIVE }, new StorageType[] {}, new StorageType[] {});
    final String[] racks = { "/d1/r1", "/d1/r2", "/d1/r2" };
    final String[] hosts = { "host1", "host2", "host3" };
    final StorageType[] types = { StorageType.DISK, StorageType.SSD, StorageType.ARCHIVE };
    final DatanodeStorageInfo[] storages = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, types);
    final DatanodeDescriptor[] dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
    File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath());
    DFSTestUtil.formatNameNode(conf);
    NameNode namenode = new NameNode(conf);
    final BlockManager bm = namenode.getNamesystem().getBlockManager();
    BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
    NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
    for (DatanodeDescriptor datanode : dataNodes) {
        cluster.add(datanode);
    }
    DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3, dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false, new HashSet<Node>(), 0, policy1, null);
    System.out.println(Arrays.asList(targets));
    Assert.assertEquals(3, targets.length);
    targets = replicator.chooseTarget("/foo", 3, dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false, new HashSet<Node>(), 0, policy2, null);
    System.out.println(Arrays.asList(targets));
    Assert.assertEquals(3, targets.length);
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) StorageType(org.apache.hadoop.fs.StorageType) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Node(org.apache.hadoop.net.Node) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) NetworkTopology(org.apache.hadoop.net.NetworkTopology) File(java.io.File) Test(org.junit.Test)

Example 43 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestBlockStoragePolicy method testGetFileStoragePolicyAfterRestartNN.

@Test
public void testGetFileStoragePolicyAfterRestartNN() throws Exception {
    //HDFS8219
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
    cluster.waitActive();
    final DistributedFileSystem fs = cluster.getFileSystem();
    try {
        final String file = "/testScheduleWithinSameNode/file";
        Path dir = new Path("/testScheduleWithinSameNode");
        fs.mkdirs(dir);
        // 2. Set Dir policy
        fs.setStoragePolicy(dir, "COLD");
        // 3. Create file
        final FSDataOutputStream out = fs.create(new Path(file));
        out.writeChars("testScheduleWithinSameNode");
        out.close();
        // 4. Set Dir policy
        fs.setStoragePolicy(dir, "HOT");
        HdfsFileStatus status = fs.getClient().getFileInfo(file);
        // 5. get file policy, it should be parent policy.
        Assert.assertTrue("File storage policy should be HOT", status.getStoragePolicy() == HOT);
        // 6. restart NameNode for reloading edits logs.
        cluster.restartNameNode(true);
        // 7. get file policy, it should be parent policy.
        status = fs.getClient().getFileInfo(file);
        Assert.assertTrue("File storage policy should be HOT", status.getStoragePolicy() == HOT);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) StorageType(org.apache.hadoop.fs.StorageType) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 44 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestDFSNetworkTopology method testAddAndRemoveTopology.

/**
   * Test the correctness of storage type info when nodes are added and removed.
   * @throws Exception
   */
@Test
public void testAddAndRemoveTopology() throws Exception {
    String[] newRack = { "/l1/d1/r1", "/l1/d1/r3", "/l1/d3/r3", "/l1/d3/r3" };
    String[] newHost = { "nhost1", "nhost2", "nhost3", "nhost4" };
    String[] newips = { "30.30.30.30", "31.31.31.31", "32.32.32.32", "33.33.33.33" };
    StorageType[] newTypes = { StorageType.DISK, StorageType.SSD, StorageType.SSD, StorageType.SSD };
    DatanodeDescriptor[] newDD = new DatanodeDescriptor[4];
    for (int i = 0; i < 4; i++) {
        DatanodeStorageInfo dsi = DFSTestUtil.createDatanodeStorageInfo("s" + newHost[i], newips[i], newRack[i], newHost[i], newTypes[i], null);
        newDD[i] = dsi.getDatanodeDescriptor();
        CLUSTER.add(newDD[i]);
    }
    DFSTopologyNodeImpl d1 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1/d1");
    HashMap<String, EnumMap<StorageType, Integer>> d1info = d1.getChildrenStorageInfo();
    assertEquals(3, d1info.keySet().size());
    assertTrue(d1info.get("r1").size() == 2 && d1info.get("r2").size() == 2 && d1info.get("r3").size() == 1);
    assertEquals(2, (int) d1info.get("r1").get(StorageType.DISK));
    assertEquals(1, (int) d1info.get("r1").get(StorageType.ARCHIVE));
    assertEquals(2, (int) d1info.get("r2").get(StorageType.DISK));
    assertEquals(1, (int) d1info.get("r2").get(StorageType.ARCHIVE));
    assertEquals(1, (int) d1info.get("r3").get(StorageType.SSD));
    DFSTopologyNodeImpl d3 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1/d3");
    HashMap<String, EnumMap<StorageType, Integer>> d3info = d3.getChildrenStorageInfo();
    assertEquals(1, d3info.keySet().size());
    assertTrue(d3info.get("r3").size() == 1);
    assertEquals(2, (int) d3info.get("r3").get(StorageType.SSD));
    DFSTopologyNodeImpl l1 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1");
    HashMap<String, EnumMap<StorageType, Integer>> l1info = l1.getChildrenStorageInfo();
    assertEquals(3, l1info.keySet().size());
    assertTrue(l1info.get("d1").size() == 3 && l1info.get("d2").size() == 3 && l1info.get("d3").size() == 1);
    assertEquals(4, (int) l1info.get("d1").get(StorageType.DISK));
    assertEquals(2, (int) l1info.get("d1").get(StorageType.ARCHIVE));
    assertEquals(1, (int) l1info.get("d1").get(StorageType.SSD));
    assertEquals(1, (int) l1info.get("d2").get(StorageType.SSD));
    assertEquals(1, (int) l1info.get("d2").get(StorageType.RAM_DISK));
    assertEquals(1, (int) l1info.get("d2").get(StorageType.DISK));
    assertEquals(2, (int) l1info.get("d3").get(StorageType.SSD));
    for (int i = 0; i < 4; i++) {
        CLUSTER.remove(newDD[i]);
    }
    // /d1/r3 should've been out, /d1/r1 should've been resumed
    DFSTopologyNodeImpl nd1 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1/d1");
    HashMap<String, EnumMap<StorageType, Integer>> nd1info = nd1.getChildrenStorageInfo();
    assertEquals(2, nd1info.keySet().size());
    assertTrue(nd1info.get("r1").size() == 2 && nd1info.get("r2").size() == 2);
    assertEquals(1, (int) nd1info.get("r1").get(StorageType.DISK));
    assertEquals(1, (int) nd1info.get("r1").get(StorageType.ARCHIVE));
    assertEquals(2, (int) nd1info.get("r2").get(StorageType.DISK));
    assertEquals(1, (int) nd1info.get("r2").get(StorageType.ARCHIVE));
    // /l1/d3 should've been out, and /l1/d1 should've been resumed
    DFSTopologyNodeImpl nl1 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1");
    HashMap<String, EnumMap<StorageType, Integer>> nl1info = nl1.getChildrenStorageInfo();
    assertEquals(2, nl1info.keySet().size());
    assertTrue(l1info.get("d1").size() == 2 && l1info.get("d2").size() == 3);
    assertEquals(2, (int) nl1info.get("d1").get(StorageType.ARCHIVE));
    assertEquals(3, (int) nl1info.get("d1").get(StorageType.DISK));
    assertEquals(1, (int) l1info.get("d2").get(StorageType.DISK));
    assertEquals(1, (int) l1info.get("d2").get(StorageType.RAM_DISK));
    assertEquals(1, (int) l1info.get("d2").get(StorageType.SSD));
    assertNull(CLUSTER.getNode("/l1/d3"));
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) StorageType(org.apache.hadoop.fs.StorageType) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) EnumMap(java.util.EnumMap) Test(org.junit.Test)

Example 45 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class LazyPersistTestCase method startUpCluster.

/**
   * If ramDiskStorageLimit is >=0, then RAM_DISK capacity is artificially
   * capped. If ramDiskStorageLimit < 0 then it is ignored.
   */
protected final void startUpCluster(int numDatanodes, boolean hasTransientStorage, StorageType[] storageTypes, int ramDiskReplicaCapacity, long ramDiskStorageLimit, long maxLockedMemory, boolean useSCR, boolean useLegacyBlockReaderLocal, boolean disableScrubber) throws IOException {
    initCacheManipulator();
    Configuration conf = new Configuration();
    conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    if (disableScrubber) {
        conf.setInt(DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC, 0);
    } else {
        conf.setInt(DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC, LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC);
    }
    conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL_SEC);
    conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, HEARTBEAT_RECHECK_INTERVAL_MSEC);
    conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC, LAZY_WRITER_INTERVAL_SEC);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
    conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, maxLockedMemory);
    if (useSCR) {
        conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
        // Do not share a client context across tests.
        conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, UUID.randomUUID().toString());
        conf.set(DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, UserGroupInformation.getCurrentUser().getShortUserName());
        if (useLegacyBlockReaderLocal) {
            conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
        } else {
            sockDir = new TemporarySocketDirectory();
            conf.set(DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(), this.getClass().getSimpleName() + "._PORT.sock").getAbsolutePath());
        }
    }
    Preconditions.checkState(ramDiskReplicaCapacity < 0 || ramDiskStorageLimit < 0, "Cannot specify non-default values for both ramDiskReplicaCapacity " + "and ramDiskStorageLimit");
    long[] capacities;
    if (hasTransientStorage && ramDiskReplicaCapacity >= 0) {
        // Convert replica count to byte count, add some delta for .meta and
        // VERSION files.
        ramDiskStorageLimit = ((long) ramDiskReplicaCapacity * BLOCK_SIZE) + (BLOCK_SIZE - 1);
    }
    capacities = new long[] { ramDiskStorageLimit, -1 };
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).storageCapacities(capacities).storageTypes(storageTypes != null ? storageTypes : (hasTransientStorage ? new StorageType[] { RAM_DISK, DEFAULT } : null)).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    client = fs.getClient();
    try {
        jmx = initJMX();
    } catch (Exception e) {
        fail("Failed initialize JMX for testing: " + e);
    }
    LOG.info("Cluster startup complete");
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) File(java.io.File) TimeoutException(java.util.concurrent.TimeoutException) UnhandledException(org.apache.commons.lang.UnhandledException) IOException(java.io.IOException) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory)

Aggregations

StorageType (org.apache.hadoop.fs.StorageType)94 Test (org.junit.Test)31 Path (org.apache.hadoop.fs.Path)27 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)25 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)19 Configuration (org.apache.hadoop.conf.Configuration)17 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)16 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)12 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)11 IOException (java.io.IOException)10 ArrayList (java.util.ArrayList)9 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ByteString (com.google.protobuf.ByteString)5 InterruptedIOException (java.io.InterruptedIOException)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 File (java.io.File)4 InetSocketAddress (java.net.InetSocketAddress)4