Search in sources :

Example 76 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class TestResolveHdfsSymlink method testLinkTargetNonExistent.

/**
   * Tests that attempting to resolve a non-existent-file
   */
@Test
public void testLinkTargetNonExistent() throws IOException {
    Path doesNotExist = new Path("/filethatdoesnotexist");
    DFSClient client = new DFSClient(cluster.getFileSystem().getUri(), cluster.getConfiguration(0));
    try {
        client.getLinkTarget(doesNotExist.toString());
        fail("Expected exception for resolving non-existent file");
    } catch (FileNotFoundException e) {
        GenericTestUtils.assertExceptionContains("File does not exist: " + doesNotExist.toString(), e);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) FileNotFoundException(java.io.FileNotFoundException) Test(org.junit.Test)

Example 77 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class TestDataNodeRollingUpgrade method testDatanodePeersXceiver.

@Test(timeout = 600000)
public // Test DatanodeXceiver has correct peer-dataxceiver pairs for sending OOB message
void testDatanodePeersXceiver() throws Exception {
    try {
        startCluster();
        // Create files in DFS.
        String testFile1 = "/" + GenericTestUtils.getMethodName() + ".01.dat";
        String testFile2 = "/" + GenericTestUtils.getMethodName() + ".02.dat";
        String testFile3 = "/" + GenericTestUtils.getMethodName() + ".03.dat";
        DFSClient client1 = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
        DFSClient client2 = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
        DFSClient client3 = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
        DFSOutputStream s1 = (DFSOutputStream) client1.create(testFile1, true);
        DFSOutputStream s2 = (DFSOutputStream) client2.create(testFile2, true);
        DFSOutputStream s3 = (DFSOutputStream) client3.create(testFile3, true);
        byte[] toWrite = new byte[1024 * 1024 * 8];
        Random rb = new Random(1111);
        rb.nextBytes(toWrite);
        s1.write(toWrite, 0, 1024 * 1024 * 8);
        s1.flush();
        s2.write(toWrite, 0, 1024 * 1024 * 8);
        s2.flush();
        s3.write(toWrite, 0, 1024 * 1024 * 8);
        s3.flush();
        assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer().getNumPeersXceiver());
        s1.close();
        s2.close();
        s3.close();
        assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer().getNumPeersXceiver());
        client1.close();
        client2.close();
        client3.close();
    } finally {
        shutdownCluster();
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Random(java.util.Random) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) Test(org.junit.Test)

Example 78 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class TestBlockReplacement method testBlockReplacement.

@Test
public void testBlockReplacement() throws Exception {
    final Configuration CONF = new HdfsConfiguration();
    final String[] INITIAL_RACKS = { "/RACK0", "/RACK1", "/RACK2" };
    final String[] NEW_RACKS = { "/RACK2" };
    final short REPLICATION_FACTOR = (short) 3;
    final int DEFAULT_BLOCK_SIZE = 1024;
    final Random r = new Random();
    CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
    CONF.setInt(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
    CONF.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 500);
    cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR).racks(INITIAL_RACKS).build();
    try {
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        Path fileName = new Path("/tmp.txt");
        // create a file with one block
        DFSTestUtil.createFile(fs, fileName, DEFAULT_BLOCK_SIZE, REPLICATION_FACTOR, r.nextLong());
        DFSTestUtil.waitReplication(fs, fileName, REPLICATION_FACTOR);
        // get all datanodes
        InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
        DFSClient client = new DFSClient(addr, CONF);
        List<LocatedBlock> locatedBlocks = client.getNamenode().getBlockLocations("/tmp.txt", 0, DEFAULT_BLOCK_SIZE).getLocatedBlocks();
        assertEquals(1, locatedBlocks.size());
        LocatedBlock block = locatedBlocks.get(0);
        DatanodeInfo[] oldNodes = block.getLocations();
        assertEquals(oldNodes.length, 3);
        ExtendedBlock b = block.getBlock();
        // add a fourth datanode to the cluster
        cluster.startDataNodes(CONF, 1, true, null, NEW_RACKS);
        cluster.waitActive();
        DatanodeInfo[] datanodes = client.datanodeReport(DatanodeReportType.ALL);
        // find out the new node
        DatanodeInfo newNode = null;
        for (DatanodeInfo node : datanodes) {
            Boolean isNewNode = true;
            for (DatanodeInfo oldNode : oldNodes) {
                if (node.equals(oldNode)) {
                    isNewNode = false;
                    break;
                }
            }
            if (isNewNode) {
                newNode = node;
                break;
            }
        }
        assertTrue(newNode != null);
        DatanodeInfo source = null;
        ArrayList<DatanodeInfo> proxies = new ArrayList<DatanodeInfo>(2);
        for (DatanodeInfo node : datanodes) {
            if (node != newNode) {
                if (node.getNetworkLocation().equals(newNode.getNetworkLocation())) {
                    source = node;
                } else {
                    proxies.add(node);
                }
            }
        }
        //current state: the newNode is on RACK2, and "source" is the other dn on RACK2.
        //the two datanodes on RACK0 and RACK1 are in "proxies".
        //"source" and both "proxies" all contain the block, while newNode doesn't yet.
        assertTrue(source != null && proxies.size() == 2);
        // start to replace the block
        // case 1: proxySource does not contain the block
        LOG.info("Testcase 1: Proxy " + newNode + " does not contain the block " + b);
        assertFalse(replaceBlock(b, source, newNode, proxies.get(0)));
        // case 2: destination already contains the block
        LOG.info("Testcase 2: Destination " + proxies.get(1) + " contains the block " + b);
        assertFalse(replaceBlock(b, source, proxies.get(0), proxies.get(1)));
        // case 3: correct case
        LOG.info("Testcase 3: Source=" + source + " Proxy=" + proxies.get(0) + " Destination=" + newNode);
        assertTrue(replaceBlock(b, source, proxies.get(0), newNode));
        // after cluster has time to resolve the over-replication,
        // block locations should contain two proxies and newNode
        // but not source
        checkBlocks(new DatanodeInfo[] { newNode, proxies.get(0), proxies.get(1) }, fileName.toString(), DEFAULT_BLOCK_SIZE, REPLICATION_FACTOR, client);
        // case 4: proxies.get(0) is not a valid del hint
        // expect either source or newNode replica to be deleted instead
        LOG.info("Testcase 4: invalid del hint " + proxies.get(0));
        assertTrue(replaceBlock(b, proxies.get(0), proxies.get(1), source));
        // after cluster has time to resolve the over-replication,
        // block locations should contain any 3 of the blocks, since after the
        // deletion the number of racks is still >=2 for sure.
        // See HDFS-9314 for details, espacially the comment on 18/Nov/15 14:09.
        checkBlocks(new DatanodeInfo[] {}, fileName.toString(), DEFAULT_BLOCK_SIZE, REPLICATION_FACTOR, client);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DFSClient(org.apache.hadoop.hdfs.DFSClient) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) InetSocketAddress(java.net.InetSocketAddress) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Random(java.util.Random) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 79 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class TestReadOnlySharedStorage method setup.

/**
   * Setup a {@link MiniDFSCluster}.
   * Create a block with both {@link State#NORMAL} and {@link State#READ_ONLY_SHARED} replicas.
   */
@Before
public void setup() throws IOException, InterruptedException {
    conf = new HdfsConfiguration();
    SimulatedFSDataset.setFactory(conf);
    Configuration[] overlays = new Configuration[NUM_DATANODES];
    for (int i = 0; i < overlays.length; i++) {
        overlays[i] = new Configuration();
        if (i == RO_NODE_INDEX) {
            overlays[i].setEnum(SimulatedFSDataset.CONFIG_PROPERTY_STATE, i == RO_NODE_INDEX ? READ_ONLY_SHARED : NORMAL);
        }
    }
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).dataNodeConfOverlays(overlays).build();
    fs = cluster.getFileSystem();
    blockManager = cluster.getNameNode().getNamesystem().getBlockManager();
    datanodeManager = blockManager.getDatanodeManager();
    client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), cluster.getConfiguration(0));
    for (int i = 0; i < NUM_DATANODES; i++) {
        DataNode dataNode = cluster.getDataNodes().get(i);
        validateStorageState(BlockManagerTestUtil.getStorageReportsForDatanode(datanodeManager.getDatanode(dataNode.getDatanodeId())), i == RO_NODE_INDEX ? READ_ONLY_SHARED : NORMAL);
    }
    // Create a 1 block file
    DFSTestUtil.createFile(fs, PATH, BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE, (short) 1, seed);
    LocatedBlock locatedBlock = getLocatedBlock();
    extendedBlock = locatedBlock.getBlock();
    block = extendedBlock.getLocalBlock();
    storedBlock = blockManager.getStoredBlock(block);
    assertThat(locatedBlock.getLocations().length, is(1));
    normalDataNode = locatedBlock.getLocations()[0];
    readOnlyDataNode = datanodeManager.getDatanode(cluster.getDataNodes().get(RO_NODE_INDEX).getDatanodeId());
    assertThat(normalDataNode, is(not(readOnlyDataNode)));
    validateNumberReplicas(1);
    // Inject the block into the datanode with READ_ONLY_SHARED storage 
    cluster.injectBlocks(0, RO_NODE_INDEX, Collections.singleton(block));
    // There should now be 2 *locations* for the block
    // Must wait until the NameNode has processed the block report for the injected blocks
    waitForLocations(2);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) InetSocketAddress(java.net.InetSocketAddress) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Before(org.junit.Before)

Example 80 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class TestIncrementalBrVariations method startUpCluster.

@Before
public void startUpCluster() throws IOException {
    conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
    fs = cluster.getFileSystem();
    client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), cluster.getConfiguration(0));
    dn0 = cluster.getDataNodes().get(0);
    poolId = cluster.getNamesystem().getBlockPoolId();
    dn0Reg = dn0.getDNRegistrationForBP(poolId);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) Before(org.junit.Before)

Aggregations

DFSClient (org.apache.hadoop.hdfs.DFSClient)97 Test (org.junit.Test)53 IOException (java.io.IOException)35 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)27 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)26 VisibleForTesting (com.google.common.annotations.VisibleForTesting)18 Path (org.apache.hadoop.fs.Path)18 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)17 InetSocketAddress (java.net.InetSocketAddress)13 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)13 Configuration (org.apache.hadoop.conf.Configuration)12 NfsConfiguration (org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration)12 FileSystem (org.apache.hadoop.fs.FileSystem)11 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)11 HdfsDataOutputStream (org.apache.hadoop.hdfs.client.HdfsDataOutputStream)9 WccData (org.apache.hadoop.nfs.nfs3.response.WccData)9 ShellBasedIdMapping (org.apache.hadoop.security.ShellBasedIdMapping)8 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)7 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)7 ArrayList (java.util.ArrayList)6