Search in sources :

Example 76 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class DataStreamer method addDatanode2ExistingPipeline.

private void addDatanode2ExistingPipeline() throws IOException {
    DataTransferProtocol.LOG.debug("lastAckedSeqno = {}", lastAckedSeqno);
    /*
       * Is data transfer necessary?  We have the following cases.
       *
       * Case 1: Failure in Pipeline Setup
       * - Append
       *    + Transfer the stored replica, which may be a RBW or a finalized.
       * - Create
       *    + If no data, then no transfer is required.
       *    + If there are data written, transfer RBW. This case may happens
       *      when there are streaming failure earlier in this pipeline.
       *
       * Case 2: Failure in Streaming
       * - Append/Create:
       *    + transfer RBW
       *
       * Case 3: Failure in Close
       * - Append/Create:
       *    + no transfer, let NameNode replicates the block.
       */
    if (!isAppend && lastAckedSeqno < 0 && stage == BlockConstructionStage.PIPELINE_SETUP_CREATE) {
        //no data have been written
        return;
    } else if (stage == BlockConstructionStage.PIPELINE_CLOSE || stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
        //pipeline is closing
        return;
    }
    int tried = 0;
    final DatanodeInfo[] original = nodes;
    final StorageType[] originalTypes = storageTypes;
    final String[] originalIDs = storageIDs;
    IOException caughtException = null;
    ArrayList<DatanodeInfo> exclude = new ArrayList<>(failed);
    while (tried < 3) {
        LocatedBlock lb;
        //get a new datanode
        lb = dfsClient.namenode.getAdditionalDatanode(src, stat.getFileId(), block.getCurrentBlock(), nodes, storageIDs, exclude.toArray(new DatanodeInfo[exclude.size()]), 1, dfsClient.clientName);
        // a new node was allocated by the namenode. Update nodes.
        setPipeline(lb);
        //find the new datanode
        final int d = findNewDatanode(original);
        //transfer replica. pick a source from the original nodes
        final DatanodeInfo src = original[tried % original.length];
        final DatanodeInfo[] targets = { nodes[d] };
        final StorageType[] targetStorageTypes = { storageTypes[d] };
        try {
            transfer(src, targets, targetStorageTypes, lb.getBlockToken());
        } catch (IOException ioe) {
            DFSClient.LOG.warn("Error transferring data from " + src + " to " + nodes[d] + ": " + ioe.getMessage());
            caughtException = ioe;
            // add the allocated node to the exclude list.
            exclude.add(nodes[d]);
            setPipeline(original, originalTypes, originalIDs);
            tried++;
            continue;
        }
        // finished successfully
        return;
    }
    // All retries failed
    throw (caughtException != null) ? caughtException : new IOException("Failed to add a node");
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 77 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class StripedDataStreamer method nextBlockOutputStream.

@Override
protected LocatedBlock nextBlockOutputStream() throws IOException {
    boolean success;
    LocatedBlock lb = getFollowingBlock();
    block.setCurrentBlock(lb.getBlock());
    block.setNumBytes(0);
    bytesSent = 0;
    accessToken = lb.getBlockToken();
    DatanodeInfo[] nodes = lb.getLocations();
    StorageType[] storageTypes = lb.getStorageTypes();
    // Connect to the DataNode. If fail the internal error state will be set.
    success = createBlockOutputStream(nodes, storageTypes, 0L, false);
    if (!success) {
        block.setCurrentBlock(null);
        final DatanodeInfo badNode = nodes[getErrorState().getBadNodeIndex()];
        LOG.warn("Excluding datanode " + badNode);
        excludedNodes.put(badNode, badNode);
        throw new IOException("Unable to create new block." + this);
    }
    return lb;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException)

Example 78 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestMover method testMoverFailedRetry.

@Test
public void testMoverFailedRetry() throws Exception {
    // HDFS-8147
    final Configuration conf = new HdfsConfiguration();
    initConf(conf);
    conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE } }).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final String file = "/testMoverFailedRetry";
        // write to DISK
        final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
        out.writeChars("testMoverFailedRetry");
        out.close();
        // Delete block file so, block move will fail with FileNotFoundException
        LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
        cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
        // move to ARCHIVE
        dfs.setStoragePolicy(new Path(file), "COLD");
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", file.toString() });
        Assert.assertEquals("Movement should fail after some retry", ExitStatus.NO_MOVE_PROGRESS.getExitCode(), rc);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 79 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestMover method testMoverWithStripedFile.

@Test(timeout = 300000)
public void testMoverWithStripedFile() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    initConfWithStripe(conf);
    // start 10 datanodes
    int numOfDatanodes = 10;
    int storagesPerDatanode = 2;
    long capacity = 10 * defaultBlockSize;
    long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
    for (int i = 0; i < numOfDatanodes; i++) {
        for (int j = 0; j < storagesPerDatanode; j++) {
            capacities[i][j] = capacity;
        }
    }
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numOfDatanodes).storagesPerDatanode(storagesPerDatanode).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE } }).storageCapacities(capacities).build();
    try {
        cluster.waitActive();
        // set "/bar" directory with HOT storage policy.
        ClientProtocol client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
        String barDir = "/bar";
        client.mkdirs(barDir, new FsPermission((short) 777), true);
        client.setStoragePolicy(barDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
        // set an EC policy on "/bar" directory
        client.setErasureCodingPolicy(barDir, StripedFileTestUtil.getDefaultECPolicy().getName());
        // write file to barDir
        final String fooFile = "/bar/foo";
        long fileLen = 20 * defaultBlockSize;
        DFSTestUtil.createFile(cluster.getFileSystem(), new Path(fooFile), fileLen, (short) 3, 0);
        // verify storage types and locations
        LocatedBlocks locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
        for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            for (StorageType type : lb.getStorageTypes()) {
                Assert.assertEquals(StorageType.DISK, type);
            }
        }
        StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks, dataBlocks + parityBlocks);
        // start 5 more datanodes
        numOfDatanodes += 5;
        capacities = new long[5][storagesPerDatanode];
        for (int i = 0; i < 5; i++) {
            for (int j = 0; j < storagesPerDatanode; j++) {
                capacities[i][j] = capacity;
            }
        }
        cluster.startDataNodes(conf, 5, new StorageType[][] { { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE } }, true, null, null, null, capacities, null, false, false, false, null);
        cluster.triggerHeartbeats();
        // move file to ARCHIVE
        client.setStoragePolicy(barDir, "COLD");
        // run Mover
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", barDir });
        Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
        // verify storage types and locations
        locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
        for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            for (StorageType type : lb.getStorageTypes()) {
                Assert.assertEquals(StorageType.ARCHIVE, type);
            }
        }
        StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks, dataBlocks + parityBlocks);
        // start 5 more datanodes
        numOfDatanodes += 5;
        capacities = new long[5][storagesPerDatanode];
        for (int i = 0; i < 5; i++) {
            for (int j = 0; j < storagesPerDatanode; j++) {
                capacities[i][j] = capacity;
            }
        }
        cluster.startDataNodes(conf, 5, new StorageType[][] { { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK } }, true, null, null, null, capacities, null, false, false, false, null);
        cluster.triggerHeartbeats();
        // move file blocks to ONE_SSD policy
        client.setStoragePolicy(barDir, "ONE_SSD");
        // run Mover
        rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", barDir });
        // verify storage types and locations
        // Movements should have been ignored for the unsupported policy on
        // striped file
        locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
        for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            for (StorageType type : lb.getStorageTypes()) {
                Assert.assertEquals(StorageType.ARCHIVE, type);
            }
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsPermission(org.apache.hadoop.fs.permission.FsPermission) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) Test(org.junit.Test)

Example 80 with StorageType

use of org.apache.hadoop.fs.StorageType in project hadoop by apache.

the class TestMover method testMoveWhenStoragePolicyNotSatisfying.

@Test(timeout = 300000)
public void testMoveWhenStoragePolicyNotSatisfying() throws Exception {
    // HDFS-8147
    final Configuration conf = new HdfsConfiguration();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(new StorageType[][] { { StorageType.DISK }, { StorageType.DISK }, { StorageType.DISK } }).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final String file = "/testMoveWhenStoragePolicyNotSatisfying";
        // write to DISK
        final FSDataOutputStream out = dfs.create(new Path(file));
        out.writeChars("testMoveWhenStoragePolicyNotSatisfying");
        out.close();
        // move to ARCHIVE
        dfs.setStoragePolicy(new Path(file), "COLD");
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", file.toString() });
        int exitcode = ExitStatus.NO_MOVE_BLOCK.getExitCode();
        Assert.assertEquals("Exit code should be " + exitcode, exitcode, rc);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Aggregations

StorageType (org.apache.hadoop.fs.StorageType)91 Test (org.junit.Test)31 Path (org.apache.hadoop.fs.Path)27 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)24 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)18 Configuration (org.apache.hadoop.conf.Configuration)17 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)16 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)12 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 IOException (java.io.IOException)9 ArrayList (java.util.ArrayList)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ByteString (com.google.protobuf.ByteString)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 File (java.io.File)4 InetSocketAddress (java.net.InetSocketAddress)4 HashSet (java.util.HashSet)4