Search in sources :

Example 61 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestStripedINodeFile method testUnsuitableStoragePoliciesWithECStripedMode.

/**
   * Tests when choosing blocks on file creation of EC striped mode should
   * ignore storage policy if that is not suitable. Supported storage policies
   * for EC Striped mode are HOT, COLD and ALL_SSD. For all other policies set
   * will be ignored and considered default policy.
   */
@Test(timeout = 60000)
public void testUnsuitableStoragePoliciesWithECStripedMode() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    int defaultStripedBlockSize = testECPolicy.getCellSize() * 4;
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1L);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false);
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
    // start 10 datanodes
    int numOfDatanodes = 10;
    int storagesPerDatanode = 2;
    long capacity = 10 * defaultStripedBlockSize;
    long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
    for (int i = 0; i < numOfDatanodes; i++) {
        for (int j = 0; j < storagesPerDatanode; j++) {
            capacities[i][j] = capacity;
        }
    }
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numOfDatanodes).storagesPerDatanode(storagesPerDatanode).storageTypes(new StorageType[][] { { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.DISK, StorageType.SSD }, { StorageType.DISK, StorageType.SSD }, { StorageType.DISK, StorageType.SSD }, { StorageType.DISK, StorageType.SSD }, { StorageType.DISK, StorageType.SSD } }).storageCapacities(capacities).build();
    try {
        cluster.waitActive();
        // set "/foo" directory with ONE_SSD storage policy.
        ClientProtocol client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
        String fooDir = "/foo";
        client.mkdirs(fooDir, new FsPermission((short) 777), true);
        client.setStoragePolicy(fooDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
        // set an EC policy on "/foo" directory
        client.setErasureCodingPolicy(fooDir, StripedFileTestUtil.getDefaultECPolicy().getName());
        // write file to fooDir
        final String barFile = "/foo/bar";
        long fileLen = 20 * defaultStripedBlockSize;
        DFSTestUtil.createFile(cluster.getFileSystem(), new Path(barFile), fileLen, (short) 3, 0);
        // verify storage types and locations
        LocatedBlocks locatedBlocks = client.getBlockLocations(barFile, 0, fileLen);
        for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            for (StorageType type : lb.getStorageTypes()) {
                Assert.assertEquals(StorageType.DISK, type);
            }
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsPermission(org.apache.hadoop.fs.permission.FsPermission) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) Test(org.junit.Test)

Example 62 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestStripedBlockUtil method createDummyLocatedBlock.

private LocatedStripedBlock createDummyLocatedBlock(int bgSize) {
    final long blockGroupID = -1048576;
    DatanodeInfo[] locs = new DatanodeInfo[groupSize];
    String[] storageIDs = new String[groupSize];
    StorageType[] storageTypes = new StorageType[groupSize];
    byte[] indices = new byte[groupSize];
    for (int i = 0; i < groupSize; i++) {
        indices[i] = (byte) ((i + 2) % dataBlocks);
        // Location port always equal to logical index of a block,
        // for easier verification
        locs[i] = DFSTestUtil.getLocalDatanodeInfo(indices[i]);
        storageIDs[i] = locs[i].getDatanodeUuid();
        storageTypes[i] = StorageType.DISK;
    }
    return new LocatedStripedBlock(new ExtendedBlock("pool", blockGroupID, bgSize, 1001), locs, storageIDs, storageTypes, indices, 0, false, null);
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock)

Example 63 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class DFSInputStream method blockSeekTo.

/**
   * Open a DataInputStream to a DataNode so that it can be read from.
   * We get block ID and the IDs of the destinations at startup, from the namenode.
   */
private synchronized DatanodeInfo blockSeekTo(long target) throws IOException {
    if (target >= getFileLength()) {
        throw new IOException("Attempted to read past end of file");
    }
    // Will be getting a new BlockReader.
    closeCurrentBlockReaders();
    //
    // Connect to best DataNode for desired Block, with potential offset
    //
    DatanodeInfo chosenNode;
    // only need to get a new access token once
    int refetchToken = 1;
    // only need to get a new encryption key once
    int refetchEncryptionKey = 1;
    boolean connectFailedOnce = false;
    while (true) {
        //
        // Compute desired block
        //
        LocatedBlock targetBlock = getBlockAt(target);
        // update current position
        this.pos = target;
        this.blockEnd = targetBlock.getStartOffset() + targetBlock.getBlockSize() - 1;
        this.currentLocatedBlock = targetBlock;
        long offsetIntoBlock = target - targetBlock.getStartOffset();
        DNAddrPair retval = chooseDataNode(targetBlock, null);
        chosenNode = retval.info;
        InetSocketAddress targetAddr = retval.addr;
        StorageType storageType = retval.storageType;
        try {
            blockReader = getBlockReader(targetBlock, offsetIntoBlock, targetBlock.getBlockSize() - offsetIntoBlock, targetAddr, storageType, chosenNode);
            if (connectFailedOnce) {
                DFSClient.LOG.info("Successfully connected to " + targetAddr + " for " + targetBlock.getBlock());
            }
            return chosenNode;
        } catch (IOException ex) {
            checkInterrupted(ex);
            if (ex instanceof InvalidEncryptionKeyException && refetchEncryptionKey > 0) {
                DFSClient.LOG.info("Will fetch a new encryption key and retry, " + "encryption key was invalid when connecting to " + targetAddr + " : " + ex);
                // The encryption key used is invalid.
                refetchEncryptionKey--;
                dfsClient.clearDataEncryptionKey();
            } else if (refetchToken > 0 && tokenRefetchNeeded(ex, targetAddr)) {
                refetchToken--;
                fetchBlockAt(target);
            } else {
                connectFailedOnce = true;
                DFSClient.LOG.warn("Failed to connect to " + targetAddr + " for block" + ", add to deadNodes and continue. " + ex, ex);
                // Put chosen node into dead list, continue
                addToDeadNodes(chosenNode);
            }
        }
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) InvalidEncryptionKeyException(org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException) InetSocketAddress(java.net.InetSocketAddress) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException)

Example 64 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class PBHelperClient method getBuilder.

private static HdfsProtos.StorageTypeQuotaInfosProto.Builder getBuilder(QuotaUsage qu) {
    HdfsProtos.StorageTypeQuotaInfosProto.Builder isb = HdfsProtos.StorageTypeQuotaInfosProto.newBuilder();
    for (StorageType t : StorageType.getTypesSupportingQuota()) {
        HdfsProtos.StorageTypeQuotaInfoProto info = HdfsProtos.StorageTypeQuotaInfoProto.newBuilder().setType(convertStorageType(t)).setConsumed(qu.getTypeConsumed(t)).setQuota(qu.getTypeQuota(t)).build();
        isb.addTypeQuotaInfo(info);
    }
    return isb;
}
Also used : HdfsProtos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos) StorageType(org.apache.hadoop.fs.StorageType)

Example 65 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class StripedBlockUtil method constructInternalBlock.

/**
   * This method creates an internal block at the given index of a block group.
   *
   * @param idxInReturnedLocs The index in the stored locations in the
   *                          {@link LocatedStripedBlock} object
   * @param idxInBlockGroup The logical index in the striped block group
   * @return The constructed internal block
   */
public static LocatedBlock constructInternalBlock(LocatedStripedBlock bg, int idxInReturnedLocs, int cellSize, int dataBlkNum, int idxInBlockGroup) {
    final ExtendedBlock blk = constructInternalBlock(bg.getBlock(), cellSize, dataBlkNum, idxInBlockGroup);
    final LocatedBlock locatedBlock;
    if (idxInReturnedLocs < bg.getLocations().length) {
        locatedBlock = new LocatedBlock(blk, new DatanodeInfo[] { bg.getLocations()[idxInReturnedLocs] }, new String[] { bg.getStorageIDs()[idxInReturnedLocs] }, new StorageType[] { bg.getStorageTypes()[idxInReturnedLocs] }, bg.getStartOffset(), bg.isCorrupt(), null);
    } else {
        locatedBlock = new LocatedBlock(blk, null, null, null, bg.getStartOffset(), bg.isCorrupt(), null);
    }
    Token<BlockTokenIdentifier>[] blockTokens = bg.getBlockTokens();
    if (idxInReturnedLocs < blockTokens.length) {
        locatedBlock.setBlockToken(blockTokens[idxInReturnedLocs]);
    }
    return locatedBlock;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Token(org.apache.hadoop.security.token.Token)

Aggregations

StorageType (org.apache.hadoop.fs.StorageType)91 Test (org.junit.Test)31 Path (org.apache.hadoop.fs.Path)27 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)24 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)18 Configuration (org.apache.hadoop.conf.Configuration)17 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)16 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)12 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 IOException (java.io.IOException)9 ArrayList (java.util.ArrayList)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ByteString (com.google.protobuf.ByteString)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 File (java.io.File)4 InetSocketAddress (java.net.InetSocketAddress)4 HashSet (java.util.HashSet)4