Search in sources :

Example 71 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class DatanodeStats method add.

synchronized void add(final DatanodeDescriptor node) {
    xceiverCount += node.getXceiverCount();
    if (node.isInService()) {
        capacityUsed += node.getDfsUsed();
        capacityUsedNonDfs += node.getNonDfsUsed();
        blockPoolUsed += node.getBlockPoolUsed();
        nodesInService++;
        nodesInServiceXceiverCount += node.getXceiverCount();
        capacityTotal += node.getCapacity();
        capacityRemaining += node.getRemaining();
        cacheCapacity += node.getCacheCapacity();
        cacheUsed += node.getCacheUsed();
    } else if (node.isDecommissionInProgress() || node.isEnteringMaintenance()) {
        cacheCapacity += node.getCacheCapacity();
        cacheUsed += node.getCacheUsed();
    }
    Set<StorageType> storageTypes = new HashSet<>();
    for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
        if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
            statsMap.addStorage(storageInfo, node);
            storageTypes.add(storageInfo.getStorageType());
        }
    }
    for (StorageType storageType : storageTypes) {
        statsMap.addNode(storageType, node);
    }
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) HashSet(java.util.HashSet)

Example 72 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class StorageLocation method parse.

/**
   * Attempt to parse a storage uri with storage class and URI. The storage
   * class component of the uri is case-insensitive.
   *
   * @param rawLocation Location string of the format [type]uri, where [type] is
   *                    optional.
   * @return A StorageLocation object if successfully parsed, null otherwise.
   *         Does not throw any exceptions.
   */
public static StorageLocation parse(String rawLocation) throws IOException, SecurityException {
    Matcher matcher = regex.matcher(rawLocation);
    StorageType storageType = StorageType.DEFAULT;
    String location = rawLocation;
    if (matcher.matches()) {
        String classString = matcher.group(1);
        location = matcher.group(2).trim();
        if (!classString.isEmpty()) {
            storageType = StorageType.valueOf(StringUtils.toUpperCase(classString));
        }
    }
    //"/a/b" and "/a/b/" are represented in a consistent manner
    return new StorageLocation(storageType, new Path(location).toUri());
}
Also used : Path(org.apache.hadoop.fs.Path) StorageType(org.apache.hadoop.fs.StorageType) Matcher(java.util.regex.Matcher)

Example 73 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class AvailableSpaceVolumeChoosingPolicy method chooseVolume.

@Override
public V chooseVolume(List<V> volumes, long replicaSize) throws IOException {
    if (volumes.size() < 1) {
        throw new DiskOutOfSpaceException("No more available volumes");
    }
    // As all the items in volumes are with the same storage type,
    // so only need to get the storage type index of the first item in volumes
    StorageType storageType = volumes.get(0).getStorageType();
    int index = storageType != null ? storageType.ordinal() : StorageType.DEFAULT.ordinal();
    synchronized (syncLocks[index]) {
        return doChooseVolume(volumes, replicaSize);
    }
}
Also used : DiskOutOfSpaceException(org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException) StorageType(org.apache.hadoop.fs.StorageType)

Example 74 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class RoundRobinVolumeChoosingPolicy method chooseVolume.

@Override
public V chooseVolume(final List<V> volumes, long blockSize) throws IOException {
    if (volumes.size() < 1) {
        throw new DiskOutOfSpaceException("No more available volumes");
    }
    // As all the items in volumes are with the same storage type,
    // so only need to get the storage type index of the first item in volumes
    StorageType storageType = volumes.get(0).getStorageType();
    int index = storageType != null ? storageType.ordinal() : StorageType.DEFAULT.ordinal();
    synchronized (syncLocks[index]) {
        return chooseVolume(index, volumes, blockSize);
    }
}
Also used : DiskOutOfSpaceException(org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException) StorageType(org.apache.hadoop.fs.StorageType)

Example 75 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class DataStreamer method nextBlockOutputStream.

/**
   * Open a DataStreamer to a DataNode so that it can be written to.
   * This happens when a file is created and each time a new block is allocated.
   * Must get block ID and the IDs of the destinations from the namenode.
   * Returns the list of target datanodes.
   */
protected LocatedBlock nextBlockOutputStream() throws IOException {
    LocatedBlock lb;
    DatanodeInfo[] nodes;
    StorageType[] storageTypes;
    int count = dfsClient.getConf().getNumBlockWriteRetry();
    boolean success;
    final ExtendedBlock oldBlock = block.getCurrentBlock();
    do {
        errorState.resetInternalError();
        lastException.clear();
        DatanodeInfo[] excluded = getExcludedNodes();
        lb = locateFollowingBlock(excluded.length > 0 ? excluded : null, oldBlock);
        block.setCurrentBlock(lb.getBlock());
        block.setNumBytes(0);
        bytesSent = 0;
        accessToken = lb.getBlockToken();
        nodes = lb.getLocations();
        storageTypes = lb.getStorageTypes();
        // Connect to first DataNode in the list.
        success = createBlockOutputStream(nodes, storageTypes, 0L, false);
        if (!success) {
            LOG.warn("Abandoning " + block);
            dfsClient.namenode.abandonBlock(block.getCurrentBlock(), stat.getFileId(), src, dfsClient.clientName);
            block.setCurrentBlock(null);
            final DatanodeInfo badNode = nodes[errorState.getBadNodeIndex()];
            LOG.warn("Excluding datanode " + badNode);
            excludedNodes.put(badNode, badNode);
        }
    } while (!success && --count >= 0);
    if (!success) {
        throw new IOException("Unable to create new block.");
    }
    return lb;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Aggregations

StorageType (org.apache.hadoop.fs.StorageType)91 Test (org.junit.Test)31 Path (org.apache.hadoop.fs.Path)27 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)24 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)18 Configuration (org.apache.hadoop.conf.Configuration)17 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)16 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)12 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 IOException (java.io.IOException)9 ArrayList (java.util.ArrayList)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ByteString (com.google.protobuf.ByteString)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 File (java.io.File)4 InetSocketAddress (java.net.InetSocketAddress)4 HashSet (java.util.HashSet)4