Search in sources :

Example 36 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestMover method testWithinSameNode.

private void testWithinSameNode(Configuration conf) throws Exception {
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final String file = "/testScheduleWithinSameNode/file";
        Path dir = new Path("/testScheduleWithinSameNode");
        dfs.mkdirs(dir);
        // write to DISK
        dfs.setStoragePolicy(dir, "HOT");
        final FSDataOutputStream out = dfs.create(new Path(file));
        out.writeChars("testScheduleWithinSameNode");
        out.close();
        // verify before movement
        LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
        StorageType[] storageTypes = lb.getStorageTypes();
        for (StorageType storageType : storageTypes) {
            Assert.assertTrue(StorageType.DISK == storageType);
        }
        // move to ARCHIVE
        dfs.setStoragePolicy(dir, "COLD");
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", dir.toString() });
        Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
        // Wait till namenode notified about the block location details
        waitForLocatedBlockWithArchiveStorageType(dfs, file, 3);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem)

Example 37 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestMover method testMoverFailedRetryWithPinnedBlocks.

/**
   * Test to verify that mover should work well with pinned blocks as well as
   * failed blocks. Mover should continue retrying the failed blocks only.
   */
@Test(timeout = 90000)
public void testMoverFailedRetryWithPinnedBlocks() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    initConf(conf);
    conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE } }).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final String parenDir = "/parent";
        dfs.mkdirs(new Path(parenDir));
        final String file1 = "/parent/testMoverFailedRetryWithPinnedBlocks1";
        // write to DISK
        final FSDataOutputStream out = dfs.create(new Path(file1), (short) 2);
        byte[] fileData = StripedFileTestUtil.generateBytes(DEFAULT_BLOCK_SIZE * 2);
        out.write(fileData);
        out.close();
        // Adding pinned blocks.
        createFileWithFavoredDatanodes(conf, cluster, dfs);
        // Delete block file so, block move will fail with FileNotFoundException
        LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file1, 0);
        Assert.assertEquals("Wrong block count", 2, locatedBlocks.locatedBlockCount());
        LocatedBlock lb = locatedBlocks.get(0);
        cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
        // move to ARCHIVE
        dfs.setStoragePolicy(new Path(parenDir), "COLD");
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", parenDir.toString() });
        Assert.assertEquals("Movement should fail after some retry", ExitStatus.NO_MOVE_PROGRESS.getExitCode(), rc);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 38 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class DiskBalancerTestUtil method newImbalancedCluster.

public static MiniDFSCluster newImbalancedCluster(final Configuration conf, final int numDatanodes, final long[] storageCapacities, final int defaultBlockSize, final int fileLen, final StartupOption dnOption) throws IOException, InterruptedException, TimeoutException {
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    final String fileName = "/" + UUID.randomUUID().toString();
    final Path filePath = new Path(fileName);
    Preconditions.checkNotNull(storageCapacities);
    Preconditions.checkArgument(storageCapacities.length == 2, "need to specify capacities for two storages.");
    // Write a file and restart the cluster
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).storageCapacities(storageCapacities).storageTypes(new StorageType[] { StorageType.DISK, StorageType.DISK }).storagesPerDatanode(2).dnStartupOption(dnOption).build();
    FsVolumeImpl source = null;
    FsVolumeImpl dest = null;
    cluster.waitActive();
    Random r = new Random();
    FileSystem fs = cluster.getFileSystem(0);
    TestBalancer.createFile(cluster, filePath, fileLen, (short) 1, 0);
    DFSTestUtil.waitReplication(fs, filePath, (short) 1);
    cluster.restartDataNodes();
    cluster.waitActive();
    // Get the data node and move all data to one disk.
    for (int i = 0; i < numDatanodes; i++) {
        DataNode dnNode = cluster.getDataNodes().get(i);
        try (FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset().getFsVolumeReferences()) {
            source = (FsVolumeImpl) refs.get(0);
            dest = (FsVolumeImpl) refs.get(1);
            assertTrue(DiskBalancerTestUtil.getBlockCount(source) > 0);
            DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(), source, dest);
            assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
        }
    }
    cluster.restartDataNodes();
    cluster.waitActive();
    return cluster;
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Random(java.util.Random) FsVolumeImpl(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) FileSystem(org.apache.hadoop.fs.FileSystem) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)

Example 39 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class ReportBadBlockAction method reportTo.

@Override
public void reportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode, DatanodeRegistration bpRegistration) throws BPServiceActorActionException {
    if (bpRegistration == null) {
        return;
    }
    DatanodeInfo[] dnArr = { new DatanodeInfoBuilder().setNodeID(bpRegistration).build() };
    String[] uuids = { storageUuid };
    StorageType[] types = { storageType };
    LocatedBlock[] locatedBlock = { new LocatedBlock(block, dnArr, uuids, types) };
    try {
        bpNamenode.reportBadBlocks(locatedBlock);
    } catch (RemoteException re) {
        DataNode.LOG.info("reportBadBlock encountered RemoteException for " + "block:  " + block, re);
    } catch (IOException e) {
        throw new BPServiceActorActionException("Failed to report bad block " + block + " to namenode.", e);
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 40 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class INodeFile method computeQuotaDeltaForTruncate.

/**
   * compute the quota usage change for a truncate op
   * @param newLength the length for truncation
   * TODO: properly handle striped blocks (HDFS-7622)
   **/
void computeQuotaDeltaForTruncate(long newLength, BlockStoragePolicy bsps, QuotaCounts delta) {
    final BlockInfo[] blocks = getBlocks();
    if (blocks.length == 0) {
        return;
    }
    long size = 0;
    for (BlockInfo b : blocks) {
        size += b.getNumBytes();
    }
    BlockInfo[] sblocks = null;
    FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
    if (sf != null) {
        FileDiff diff = sf.getDiffs().getLast();
        sblocks = diff != null ? diff.getBlocks() : null;
    }
    for (int i = blocks.length - 1; i >= 0 && size > newLength; size -= blocks[i].getNumBytes(), --i) {
        BlockInfo bi = blocks[i];
        long truncatedBytes;
        if (size - newLength < bi.getNumBytes()) {
            // Record a full block as the last block will be copied during
            // recovery
            truncatedBytes = bi.getNumBytes() - getPreferredBlockSize();
        } else {
            truncatedBytes = bi.getNumBytes();
        }
        // existing files
        if (sblocks != null && i < sblocks.length && bi.equals(sblocks[i])) {
            truncatedBytes -= bi.getNumBytes();
        }
        delta.addStorageSpace(-truncatedBytes * bi.getReplication());
        if (bsps != null) {
            List<StorageType> types = bsps.chooseStorageTypes(bi.getReplication());
            for (StorageType t : types) {
                if (t.supportTypeQuota()) {
                    delta.addTypeSpace(t, -truncatedBytes);
                }
            }
        }
    }
}
Also used : FileWithSnapshotFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature) StorageType(org.apache.hadoop.fs.StorageType) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff)

Aggregations

StorageType (org.apache.hadoop.fs.StorageType)94 Test (org.junit.Test)31 Path (org.apache.hadoop.fs.Path)27 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)25 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)19 Configuration (org.apache.hadoop.conf.Configuration)17 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)16 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)12 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)11 IOException (java.io.IOException)10 ArrayList (java.util.ArrayList)9 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ByteString (com.google.protobuf.ByteString)5 InterruptedIOException (java.io.InterruptedIOException)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 File (java.io.File)4 InetSocketAddress (java.net.InetSocketAddress)4