Search in sources :

Example 36 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestDiskBalancerRPC method testCancelNonExistentPlan.

@Test
public void testCancelNonExistentPlan() throws Exception {
    RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
    DataNode dataNode = rpcTestHelper.getDataNode();
    String planHash = rpcTestHelper.getPlanHash();
    char[] hashArray = planHash.toCharArray();
    hashArray[0]++;
    planHash = String.valueOf(hashArray);
    NodePlan plan = rpcTestHelper.getPlan();
    thrown.expect(DiskBalancerException.class);
    thrown.expect(new DiskBalancerResultVerifier(Result.NO_SUCH_PLAN));
    dataNode.cancelDiskBalancePlan(planHash);
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 37 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestDiskBalancerRPC method testMoveBlockAcrossVolume.

@Test
public void testMoveBlockAcrossVolume() throws Exception {
    Configuration conf = new HdfsConfiguration();
    final int defaultBlockSize = 100;
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
    String fileName = "/tmp.txt";
    Path filePath = new Path(fileName);
    final int numDatanodes = 1;
    final int dnIndex = 0;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    FsVolumeImpl source = null;
    FsVolumeImpl dest = null;
    try {
        cluster.waitActive();
        Random r = new Random();
        FileSystem fs = cluster.getFileSystem(dnIndex);
        DFSTestUtil.createFile(fs, filePath, 10 * 1024, (short) 1, r.nextLong());
        DataNode dnNode = cluster.getDataNodes().get(dnIndex);
        FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset().getFsVolumeReferences();
        try {
            source = (FsVolumeImpl) refs.get(0);
            dest = (FsVolumeImpl) refs.get(1);
            DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(), source, dest);
            assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
        } finally {
            refs.close();
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Random(java.util.Random) FsVolumeImpl(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 38 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestInterDatanodeProtocol method testUpdateReplicaUnderRecovery.

/** 
   * Test  for
   * {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)} 
   * */
@Test
public void testUpdateReplicaUnderRecovery() throws IOException {
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        //create a file
        DistributedFileSystem dfs = cluster.getFileSystem();
        String filestr = "/foo";
        Path filepath = new Path(filestr);
        DFSTestUtil.createFile(dfs, filepath, 1024L, (short) 3, 0L);
        //get block info
        final LocatedBlock locatedblock = getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
        final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
        Assert.assertTrue(datanodeinfo.length > 0);
        //get DataNode and FSDataset objects
        final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
        Assert.assertTrue(datanode != null);
        //initReplicaRecovery
        final ExtendedBlock b = locatedblock.getBlock();
        final long recoveryid = b.getGenerationStamp() + 1;
        final long newlength = b.getNumBytes() - 1;
        final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode);
        final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(new RecoveringBlock(b, null, recoveryid));
        //check replica
        final Replica replica = cluster.getFsDatasetTestUtils(datanode).fetchReplica(b);
        Assert.assertEquals(ReplicaState.RUR, replica.getState());
        //check meta data before update
        cluster.getFsDatasetTestUtils(datanode).checkStoredReplica(replica);
        //case "THIS IS NOT SUPPOSED TO HAPPEN"
        //with (block length) != (stored replica's on disk length). 
        {
            //create a block with same id and gs but different length.
            final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri.getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
            try {
                //update should fail
                fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, tmp.getBlockId(), newlength);
                Assert.fail();
            } catch (IOException ioe) {
                System.out.println("GOOD: getting " + ioe);
            }
        }
        //update
        final Replica r = fsdataset.updateReplicaUnderRecovery(new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, rri.getBlockId(), newlength);
        assertTrue(r != null);
        assertTrue(r.getStorageUuid() != null);
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica) ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) Test(org.junit.Test)

Example 39 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestDiskBalancerRPC method testSubmitPlanWithInvalidPlan.

@Test
public void testSubmitPlanWithInvalidPlan() throws Exception {
    RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
    DataNode dataNode = rpcTestHelper.getDataNode();
    String planHash = rpcTestHelper.getPlanHash();
    int planVersion = rpcTestHelper.getPlanVersion();
    NodePlan plan = rpcTestHelper.getPlan();
    thrown.expect(DiskBalancerException.class);
    thrown.expect(new DiskBalancerResultVerifier(Result.INVALID_PLAN));
    dataNode.submitDiskBalancerPlan(planHash, planVersion, "", "", false);
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 40 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestDiskBalancerCommand method testDiskBalancerQueryWithoutSubmit.

/**
   * Making sure that we can query the node without having done a submit.
   * @throws Exception
   */
@Test
public void testDiskBalancerQueryWithoutSubmit() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    final int numDatanodes = 2;
    MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    try {
        miniDFSCluster.waitActive();
        DataNode dataNode = miniDFSCluster.getDataNodes().get(0);
        final String queryArg = String.format("-query localhost:%d", dataNode.getIpcPort());
        final String cmdLine = String.format("hdfs diskbalancer %s", queryArg);
        runCommand(cmdLine);
    } finally {
        miniDFSCluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Aggregations

DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)165 Test (org.junit.Test)110 Path (org.apache.hadoop.fs.Path)78 Configuration (org.apache.hadoop.conf.Configuration)60 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)47 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)37 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)37 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)35 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)29 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)28 FileSystem (org.apache.hadoop.fs.FileSystem)27 IOException (java.io.IOException)24 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 ArrayList (java.util.ArrayList)17 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)17 File (java.io.File)15 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)14 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)13 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)12