Search in sources :

Example 26 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestWriteToReplica method testClose.

// test close
@Test
public void testClose() throws Exception {
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
    try {
        cluster.waitActive();
        DataNode dn = cluster.getDataNodes().get(0);
        FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
        // set up replicasMap
        String bpid = cluster.getNamesystem().getBlockPoolId();
        ExtendedBlock[] blocks = setup(bpid, cluster.getFsDatasetTestUtils(dn));
        // test close
        testClose(dataSet, blocks);
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 27 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestDiskBalancerCommand method runAndVerifyPlan.

private String runAndVerifyPlan(final MiniDFSCluster miniCluster, final Configuration hdfsConf) throws Exception {
    String cmdLine = "";
    List<String> outputs = null;
    final DataNode dn = miniCluster.getDataNodes().get(0);
    /* run plan command */
    cmdLine = String.format("hdfs diskbalancer -%s %s", PLAN, dn.getDatanodeUuid());
    outputs = runCommand(cmdLine, hdfsConf, miniCluster);
    /* get path of plan file*/
    final String planFileName = dn.getDatanodeUuid();
    /* verify plan command */
    assertEquals("There must be two lines: the 1st is writing plan to...," + " the 2nd is actual full path of plan file.", 2, outputs.size());
    assertThat(outputs.get(1), containsString(planFileName));
    /* get full path of plan file*/
    final String planFileFullName = outputs.get(1);
    return planFileFullName;
}
Also used : DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString)

Example 28 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestSpaceReservation method testReservedSpaceForPipelineRecovery.

@Test(timeout = 30000)
public void testReservedSpaceForPipelineRecovery() throws Exception {
    final short replication = 3;
    startCluster(BLOCK_SIZE, replication, -1);
    final String methodName = GenericTestUtils.getMethodName();
    final Path file = new Path("/" + methodName + ".01.dat");
    old = DataNodeFaultInjector.get();
    // Fault injector to fail connection to mirror first time.
    DataNodeFaultInjector.set(new DataNodeFaultInjector() {

        private int tries = 0;

        @Override
        public void failMirrorConnection() throws IOException {
            if (tries++ == 0) {
                throw new IOException("Failing Mirror for space reservation");
            }
        }
    });
    // Write 1 byte to the file and kill the writer.
    FSDataOutputStream os = fs.create(file, replication);
    os.write(new byte[1]);
    os.close();
    // Ensure all space reserved for the replica was released on each
    // DataNode.
    cluster.triggerBlockReports();
    for (final DataNode dn : cluster.getDataNodes()) {
        try (FsDatasetSpi.FsVolumeReferences volumes = dn.getFSDataset().getFsVolumeReferences()) {
            final FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
            GenericTestUtils.waitFor(new Supplier<Boolean>() {

                @Override
                public Boolean get() {
                    LOG.info("dn " + dn.getDisplayName() + " space : " + volume.getReservedForReplicas());
                    return (volume.getReservedForReplicas() == 0);
                }
            }, 100, // Wait until the test times out.
            Integer.MAX_VALUE);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DataNodeFaultInjector(org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) IOException(java.io.IOException) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 29 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestSpaceReservation method testSpaceReleasedOnUnexpectedEof.

/**
   * Ensure that reserved space is released when the client goes away
   * unexpectedly.
   *
   * The verification is done for each replica in the write pipeline.
   *
   * @throws IOException
   */
@Test(timeout = 300000)
public void testSpaceReleasedOnUnexpectedEof() throws IOException, InterruptedException, TimeoutException {
    final short replication = 3;
    startCluster(BLOCK_SIZE, replication, -1);
    final String methodName = GenericTestUtils.getMethodName();
    final Path file = new Path("/" + methodName + ".01.dat");
    // Write 1 byte to the file and kill the writer.
    FSDataOutputStream os = fs.create(file, replication);
    os.write(new byte[1]);
    os.hsync();
    DFSTestUtil.abortStream((DFSOutputStream) os.getWrappedStream());
    // DataNode.
    for (DataNode dn : cluster.getDataNodes()) {
        try (FsDatasetSpi.FsVolumeReferences volumes = dn.getFSDataset().getFsVolumeReferences()) {
            final FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
            GenericTestUtils.waitFor(new Supplier<Boolean>() {

                @Override
                public Boolean get() {
                    return (volume.getReservedForReplicas() == 0);
                }
            }, 500, // Wait until the test times out.
            Integer.MAX_VALUE);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 30 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class DiskBalancerTestUtil method newImbalancedCluster.

public static MiniDFSCluster newImbalancedCluster(final Configuration conf, final int numDatanodes, final long[] storageCapacities, final int defaultBlockSize, final int fileLen, final StartupOption dnOption) throws IOException, InterruptedException, TimeoutException {
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    final String fileName = "/" + UUID.randomUUID().toString();
    final Path filePath = new Path(fileName);
    Preconditions.checkNotNull(storageCapacities);
    Preconditions.checkArgument(storageCapacities.length == 2, "need to specify capacities for two storages.");
    // Write a file and restart the cluster
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).storageCapacities(storageCapacities).storageTypes(new StorageType[] { StorageType.DISK, StorageType.DISK }).storagesPerDatanode(2).dnStartupOption(dnOption).build();
    FsVolumeImpl source = null;
    FsVolumeImpl dest = null;
    cluster.waitActive();
    Random r = new Random();
    FileSystem fs = cluster.getFileSystem(0);
    TestBalancer.createFile(cluster, filePath, fileLen, (short) 1, 0);
    DFSTestUtil.waitReplication(fs, filePath, (short) 1);
    cluster.restartDataNodes();
    cluster.waitActive();
    // Get the data node and move all data to one disk.
    for (int i = 0; i < numDatanodes; i++) {
        DataNode dnNode = cluster.getDataNodes().get(i);
        try (FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset().getFsVolumeReferences()) {
            source = (FsVolumeImpl) refs.get(0);
            dest = (FsVolumeImpl) refs.get(1);
            assertTrue(DiskBalancerTestUtil.getBlockCount(source) > 0);
            DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(), source, dest);
            assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
        }
    }
    cluster.restartDataNodes();
    cluster.waitActive();
    return cluster;
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Random(java.util.Random) FsVolumeImpl(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) FileSystem(org.apache.hadoop.fs.FileSystem) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)

Aggregations

DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)165 Test (org.junit.Test)110 Path (org.apache.hadoop.fs.Path)78 Configuration (org.apache.hadoop.conf.Configuration)60 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)47 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)37 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)37 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)35 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)29 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)28 FileSystem (org.apache.hadoop.fs.FileSystem)27 IOException (java.io.IOException)24 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 ArrayList (java.util.ArrayList)17 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)17 File (java.io.File)15 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)14 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)13 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)12