Search in sources :

Example 31 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class PBHelper method convertBlockECReconstructionInfo.

public static BlockECReconstructionInfo convertBlockECReconstructionInfo(BlockECReconstructionInfoProto blockEcReconstructionInfoProto) {
    ExtendedBlockProto blockProto = blockEcReconstructionInfoProto.getBlock();
    ExtendedBlock block = PBHelperClient.convert(blockProto);
    DatanodeInfosProto sourceDnInfosProto = blockEcReconstructionInfoProto.getSourceDnInfos();
    DatanodeInfo[] sourceDnInfos = PBHelperClient.convert(sourceDnInfosProto);
    DatanodeInfosProto targetDnInfosProto = blockEcReconstructionInfoProto.getTargetDnInfos();
    DatanodeInfo[] targetDnInfos = PBHelperClient.convert(targetDnInfosProto);
    HdfsProtos.StorageUuidsProto targetStorageUuidsProto = blockEcReconstructionInfoProto.getTargetStorageUuids();
    String[] targetStorageUuids = convert(targetStorageUuidsProto);
    StorageTypesProto targetStorageTypesProto = blockEcReconstructionInfoProto.getTargetStorageTypes();
    StorageType[] convertStorageTypes = PBHelperClient.convertStorageTypes(targetStorageTypesProto.getStorageTypesList(), targetStorageTypesProto.getStorageTypesList().size());
    byte[] liveBlkIndices = blockEcReconstructionInfoProto.getLiveBlockIndices().toByteArray();
    ErasureCodingPolicy ecPolicy = PBHelperClient.convertErasureCodingPolicy(blockEcReconstructionInfoProto.getEcPolicy());
    return new BlockECReconstructionInfo(block, sourceDnInfos, targetDnInfos, targetStorageUuids, convertStorageTypes, liveBlkIndices, ecPolicy);
}
Also used : BlockECReconstructionInfo(org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ByteString(com.google.protobuf.ByteString) HdfsProtos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos) StorageUuidsProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto) StorageTypesProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto) ExtendedBlockProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) DatanodeInfosProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto)

Example 32 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class Balancer method init.

/**
   * Given a datanode storage set, build a network topology and decide
   * over-utilized storages, above average utilized storages, 
   * below average utilized storages, and underutilized storages. 
   * The input datanode storage set is shuffled in order to randomize
   * to the storage matching later on.
   *
   * @return the number of bytes needed to move in order to balance the cluster.
   */
private long init(List<DatanodeStorageReport> reports) {
    // compute average utilization
    for (DatanodeStorageReport r : reports) {
        policy.accumulateSpaces(r);
    }
    policy.initAvgUtilization();
    // create network topology and classify utilization collections: 
    //   over-utilized, above-average, below-average and under-utilized.
    long overLoadedBytes = 0L, underLoadedBytes = 0L;
    for (DatanodeStorageReport r : reports) {
        final DDatanode dn = dispatcher.newDatanode(r.getDatanodeInfo());
        final boolean isSource = Util.isIncluded(sourceNodes, dn.getDatanodeInfo());
        for (StorageType t : StorageType.getMovableTypes()) {
            final Double utilization = policy.getUtilization(r, t);
            if (utilization == null) {
                // datanode does not have such storage type 
                continue;
            }
            final double average = policy.getAvgUtilization(t);
            if (utilization >= average && !isSource) {
                LOG.info(dn + "[" + t + "] has utilization=" + utilization + " >= average=" + average + " but it is not specified as a source; skipping it.");
                continue;
            }
            final double utilizationDiff = utilization - average;
            final long capacity = getCapacity(r, t);
            final double thresholdDiff = Math.abs(utilizationDiff) - threshold;
            final long maxSize2Move = computeMaxSize2Move(capacity, getRemaining(r, t), utilizationDiff, maxSizeToMove);
            final StorageGroup g;
            if (utilizationDiff > 0) {
                final Source s = dn.addSource(t, maxSize2Move, dispatcher);
                if (thresholdDiff <= 0) {
                    // within threshold
                    aboveAvgUtilized.add(s);
                } else {
                    overLoadedBytes += percentage2bytes(thresholdDiff, capacity);
                    overUtilized.add(s);
                }
                g = s;
            } else {
                g = dn.addTarget(t, maxSize2Move);
                if (thresholdDiff <= 0) {
                    // within threshold
                    belowAvgUtilized.add(g);
                } else {
                    underLoadedBytes += percentage2bytes(thresholdDiff, capacity);
                    underUtilized.add(g);
                }
            }
            dispatcher.getStorageGroupMap().put(g);
        }
    }
    logUtilizationCollections();
    Preconditions.checkState(dispatcher.getStorageGroupMap().size() == overUtilized.size() + underUtilized.size() + aboveAvgUtilized.size() + belowAvgUtilized.size(), "Mismatched number of storage groups");
    // return number of bytes to be moved in order to make the cluster balanced
    return Math.max(overLoadedBytes, underLoadedBytes);
}
Also used : StorageGroup(org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup) StorageType(org.apache.hadoop.fs.StorageType) DatanodeStorageReport(org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport) DDatanode(org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode) Source(org.apache.hadoop.hdfs.server.balancer.Dispatcher.Source)

Example 33 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class StoragePolicySummary method getStoragePolicy.

/**
   * 
   * @param storageTypes - sorted array of storageTypes
   * @return Storage Policy which matches the specific storage Combination
   */
private BlockStoragePolicy getStoragePolicy(StorageType[] storageTypes) {
    for (BlockStoragePolicy storagePolicy : storagePolicies) {
        StorageType[] policyStorageTypes = storagePolicy.getStorageTypes();
        policyStorageTypes = Arrays.copyOf(policyStorageTypes, policyStorageTypes.length);
        Arrays.sort(policyStorageTypes);
        if (policyStorageTypes.length <= storageTypes.length) {
            int i = 0;
            for (; i < policyStorageTypes.length; i++) {
                if (policyStorageTypes[i] != storageTypes[i]) {
                    break;
                }
            }
            if (i < policyStorageTypes.length) {
                continue;
            }
            int j = policyStorageTypes.length;
            for (; j < storageTypes.length; j++) {
                if (policyStorageTypes[i - 1] != storageTypes[j]) {
                    break;
                }
            }
            if (j == storageTypes.length) {
                return storagePolicy;
            }
        }
    }
    return null;
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)

Example 34 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class FsDatasetImpl method addVolume.

@Override
public void addVolume(final StorageLocation location, final List<NamespaceInfo> nsInfos) throws IOException {
    // Prepare volume in DataStorage
    final DataStorage.VolumeBuilder builder;
    try {
        builder = dataStorage.prepareVolume(datanode, location, nsInfos);
    } catch (IOException e) {
        volumes.addVolumeFailureInfo(new VolumeFailureInfo(location, Time.now()));
        throw e;
    }
    final Storage.StorageDirectory sd = builder.getStorageDirectory();
    StorageType storageType = location.getStorageType();
    final FsVolumeImpl fsVolume = createFsVolume(sd.getStorageUuid(), sd, location);
    final ReplicaMap tempVolumeMap = new ReplicaMap(new AutoCloseableLock());
    ArrayList<IOException> exceptions = Lists.newArrayList();
    for (final NamespaceInfo nsInfo : nsInfos) {
        String bpid = nsInfo.getBlockPoolID();
        try {
            fsVolume.addBlockPool(bpid, this.conf, this.timer);
            fsVolume.getVolumeMap(bpid, tempVolumeMap, ramDiskReplicaTracker);
        } catch (IOException e) {
            LOG.warn("Caught exception when adding " + fsVolume + ". Will throw later.", e);
            exceptions.add(e);
        }
    }
    if (!exceptions.isEmpty()) {
        try {
            sd.unlock();
        } catch (IOException e) {
            exceptions.add(e);
        }
        throw MultipleIOException.createIOException(exceptions);
    }
    final FsVolumeReference ref = fsVolume.obtainReference();
    setupAsyncLazyPersistThread(fsVolume);
    builder.build();
    activateVolume(tempVolumeMap, sd, storageType, ref);
    LOG.info("Added volume - " + location + ", StorageType: " + storageType);
}
Also used : DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) StorageType(org.apache.hadoop.fs.StorageType) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)

Example 35 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestPBHelper method createLocatedBlock.

private LocatedBlock createLocatedBlock() {
    DatanodeInfo[] dnInfos = { DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4", AdminStates.NORMAL) };
    String[] storageIDs = { "s1", "s2", "s3", "s4" };
    StorageType[] media = { StorageType.DISK, StorageType.SSD, StorageType.DISK, StorageType.RAM_DISK };
    LocatedBlock lb = new LocatedBlock(new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, storageIDs, media, 5, false, new DatanodeInfo[] {});
    lb.setBlockToken(new Token<BlockTokenIdentifier>("identifier".getBytes(), "password".getBytes(), new Text("kind"), new Text("service")));
    return lb;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Text(org.apache.hadoop.io.Text) ByteString(com.google.protobuf.ByteString)

Aggregations

StorageType (org.apache.hadoop.fs.StorageType)94 Test (org.junit.Test)31 Path (org.apache.hadoop.fs.Path)27 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)25 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)19 Configuration (org.apache.hadoop.conf.Configuration)17 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)16 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)12 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)11 IOException (java.io.IOException)10 ArrayList (java.util.ArrayList)9 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ByteString (com.google.protobuf.ByteString)5 InterruptedIOException (java.io.InterruptedIOException)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 File (java.io.File)4 InetSocketAddress (java.net.InetSocketAddress)4