Search in sources :

Example 21 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestFileAppend method testBreakHardlinksIfNeeded.

@Test
public void testBreakHardlinksIfNeeded() throws IOException {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    try {
        // create a new file, write to it and close it.
        Path file1 = new Path("/filestatus.dat");
        FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
        writeFile(stm);
        stm.close();
        // Get a handle to the datanode
        DataNode[] dn = cluster.listDataNodes();
        assertTrue("There should be only one datanode but found " + dn.length, dn.length == 1);
        LocatedBlocks locations = client.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
        List<LocatedBlock> blocks = locations.getLocatedBlocks();
        final FsDatasetSpi<?> fsd = dn[0].getFSDataset();
        //
        for (int i = 0; i < blocks.size(); i = i + 2) {
            ExtendedBlock b = blocks.get(i).getBlock();
            final File f = FsDatasetTestUtil.getBlockFile(fsd, b.getBlockPoolId(), b.getLocalBlock());
            File link = new File(f.toString() + ".link");
            System.out.println("Creating hardlink for File " + f + " to " + link);
            HardLink.createHardLink(f, link);
        }
        // Detach all blocks. This should remove hardlinks (if any)
        for (int i = 0; i < blocks.size(); i++) {
            ExtendedBlock b = blocks.get(i).getBlock();
            System.out.println("breakHardlinksIfNeeded detaching block " + b);
            assertTrue("breakHardlinksIfNeeded(" + b + ") should have returned true", FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b));
        }
        // return false
        for (int i = 0; i < blocks.size(); i++) {
            ExtendedBlock b = blocks.get(i).getBlock();
            System.out.println("breakHardlinksIfNeeded re-attempting to " + "detach block " + b);
            assertTrue("breakHardlinksIfNeeded(" + b + ") should have returned false", FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b));
        }
    } finally {
        client.close();
        fs.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) File(java.io.File) Test(org.junit.Test)

Example 22 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestGetBlocks method testGetBlocks.

/** test getBlocks */
@Test
public void testGetBlocks() throws Exception {
    final Configuration CONF = new HdfsConfiguration();
    final short REPLICATION_FACTOR = (short) 2;
    final int DEFAULT_BLOCK_SIZE = 1024;
    CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
    CONF.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR).build();
    try {
        cluster.waitActive();
        // the third block will not be visible to getBlocks
        long fileLen = 2 * DEFAULT_BLOCK_SIZE + 1;
        DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/tmp.txt"), fileLen, REPLICATION_FACTOR, 0L);
        // get blocks & data nodes
        List<LocatedBlock> locatedBlocks;
        DatanodeInfo[] dataNodes = null;
        boolean notWritten;
        do {
            final DFSClient dfsclient = new DFSClient(DFSUtilClient.getNNAddress(CONF), CONF);
            locatedBlocks = dfsclient.getNamenode().getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
            assertEquals(3, locatedBlocks.size());
            notWritten = false;
            for (int i = 0; i < 2; i++) {
                dataNodes = locatedBlocks.get(i).getLocations();
                if (dataNodes.length != REPLICATION_FACTOR) {
                    notWritten = true;
                    try {
                        Thread.sleep(10);
                    } catch (InterruptedException e) {
                    }
                    break;
                }
            }
        } while (notWritten);
        // get RPC client to namenode
        InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
        NamenodeProtocol namenode = NameNodeProxies.createProxy(CONF, DFSUtilClient.getNNUri(addr), NamenodeProtocol.class).getProxy();
        // get blocks of size fileLen from dataNodes[0]
        BlockWithLocations[] locs;
        locs = namenode.getBlocks(dataNodes[0], fileLen).getBlocks();
        assertEquals(locs.length, 2);
        assertEquals(locs[0].getStorageIDs().length, 2);
        assertEquals(locs[1].getStorageIDs().length, 2);
        // get blocks of size BlockSize from dataNodes[0]
        locs = namenode.getBlocks(dataNodes[0], DEFAULT_BLOCK_SIZE).getBlocks();
        assertEquals(locs.length, 1);
        assertEquals(locs[0].getStorageIDs().length, 2);
        // get blocks of size 1 from dataNodes[0]
        locs = namenode.getBlocks(dataNodes[0], 1).getBlocks();
        assertEquals(locs.length, 1);
        assertEquals(locs[0].getStorageIDs().length, 2);
        // get blocks of size 0 from dataNodes[0]
        getBlocksWithException(namenode, dataNodes[0], 0);
        // get blocks of size -1 from dataNodes[0]
        getBlocksWithException(namenode, dataNodes[0], -1);
        // get blocks of size BlockSize from a non-existent datanode
        DatanodeInfo info = DFSTestUtil.getDatanodeInfo("1.2.3.4");
        getBlocksWithException(namenode, info, 2);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) BlockWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) NamenodeProtocol(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol) Test(org.junit.Test)

Example 23 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestFileCreation method testLeaseExpireHardLimit.

/**
   * Create a file, write something, hflush but not close.
   * Then change lease period and wait for lease recovery.
   * Finally, read the block directly from each Datanode and verify the content.
   */
@Test
public void testLeaseExpireHardLimit() throws Exception {
    System.out.println("testLeaseExpireHardLimit start");
    final long leasePeriod = 1000;
    final int DATANODE_NUM = 3;
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
    // create cluster
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
    DistributedFileSystem dfs = null;
    try {
        cluster.waitActive();
        dfs = cluster.getFileSystem();
        // create a new file.
        final String f = DIR + "foo";
        final Path fpath = new Path(f);
        HdfsDataOutputStream out = create(dfs, fpath, DATANODE_NUM);
        out.write("something".getBytes());
        out.hflush();
        int actualRepl = out.getCurrentBlockReplication();
        assertTrue(f + " should be replicated to " + DATANODE_NUM + " datanodes.", actualRepl == DATANODE_NUM);
        // set the soft and hard limit to be 1 second so that the
        // namenode triggers lease recovery
        cluster.setLeasePeriod(leasePeriod, leasePeriod);
        // wait for the lease to expire
        try {
            Thread.sleep(5 * leasePeriod);
        } catch (InterruptedException e) {
        }
        LocatedBlocks locations = dfs.dfs.getNamenode().getBlockLocations(f, 0, Long.MAX_VALUE);
        assertEquals(1, locations.locatedBlockCount());
        LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
        int successcount = 0;
        for (DatanodeInfo datanodeinfo : locatedblock.getLocations()) {
            DataNode datanode = cluster.getDataNode(datanodeinfo.getIpcPort());
            ExtendedBlock blk = locatedblock.getBlock();
            try (BufferedReader in = new BufferedReader(new InputStreamReader(datanode.getFSDataset().getBlockInputStream(blk, 0)))) {
                assertEquals("something", in.readLine());
                successcount++;
            }
        }
        System.out.println("successcount=" + successcount);
        assertTrue(successcount > 0);
    } finally {
        IOUtils.closeStream(dfs);
        cluster.shutdown();
    }
    System.out.println("testLeaseExpireHardLimit successful");
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) InputStreamReader(java.io.InputStreamReader) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BufferedReader(java.io.BufferedReader) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) Test(org.junit.Test)

Example 24 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestInjectionForSimulatedStorage method waitForBlockReplication.

// Waits for all of the blocks to have expected replication
private void waitForBlockReplication(String filename, ClientProtocol namenode, int expected, long maxWaitSec) throws IOException {
    long start = Time.monotonicNow();
    //wait for all the blocks to be replicated;
    LOG.info("Checking for block replication for " + filename);
    LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE);
    assertEquals(numBlocks, blocks.locatedBlockCount());
    for (int i = 0; i < numBlocks; ++i) {
        LOG.info("Checking for block:" + (i + 1));
        while (true) {
            // Loop to check for block i (usually when 0 is done all will be done
            blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE);
            assertEquals(numBlocks, blocks.locatedBlockCount());
            LocatedBlock block = blocks.get(i);
            int actual = block.getLocations().length;
            if (actual == expected) {
                LOG.info("Got enough replicas for " + (i + 1) + "th block " + block.getBlock() + ", got " + actual + ".");
                break;
            }
            LOG.info("Not enough replicas for " + (i + 1) + "th block " + block.getBlock() + " yet. Expecting " + expected + ", got " + actual + ".");
            if (maxWaitSec > 0 && (Time.monotonicNow() - start) > (maxWaitSec * 1000)) {
                throw new IOException("Timedout while waiting for all blocks to " + " be replicated for " + filename);
            }
            try {
                Thread.sleep(500);
            } catch (InterruptedException ignored) {
            }
        }
    }
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException)

Example 25 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestBlockToken method testBlockTokenRpcLeak.

/**
   * Test that fast repeated invocations of createClientDatanodeProtocolProxy
   * will not end up using up thousands of sockets. This is a regression test
   * for HDFS-1965.
   */
private void testBlockTokenRpcLeak(boolean enableProtobuf) throws Exception {
    Configuration conf = new Configuration();
    conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
    Assume.assumeTrue(FD_DIR.exists());
    BlockTokenSecretManager sm = new BlockTokenSecretManager(blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null, enableProtobuf);
    Token<BlockTokenIdentifier> token = sm.generateToken(block3, EnumSet.allOf(BlockTokenIdentifier.AccessMode.class));
    final Server server = createMockDatanode(sm, token, conf);
    server.start();
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
    ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
    LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
    fakeBlock.setBlockToken(token);
    // Create another RPC proxy with the same configuration - this will never
    // attempt to connect anywhere -- but it causes the refcount on the
    // RPC "Client" object to stay above 0 such that RPC.stopProxy doesn't
    // actually close the TCP connections to the real target DN.
    ClientDatanodeProtocol proxyToNoWhere = RPC.getProxy(ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID, new InetSocketAddress("1.1.1.1", 1), UserGroupInformation.createRemoteUser("junk"), conf, NetUtils.getDefaultSocketFactory(conf));
    ClientDatanodeProtocol proxy = null;
    int fdsAtStart = countOpenFileDescriptors();
    try {
        long endTime = Time.now() + 3000;
        while (Time.now() < endTime) {
            proxy = DFSUtilClient.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000, false, fakeBlock);
            assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
            if (proxy != null) {
                RPC.stopProxy(proxy);
            }
            LOG.info("Num open fds:" + countOpenFileDescriptors());
        }
        int fdsAtEnd = countOpenFileDescriptors();
        if (fdsAtEnd - fdsAtStart > 50) {
            fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!");
        }
    } finally {
        server.stop();
    }
    RPC.stopProxy(proxyToNoWhere);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) SaslRpcServer(org.apache.hadoop.security.SaslRpcServer) Server(org.apache.hadoop.ipc.Server) InetSocketAddress(java.net.InetSocketAddress) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Aggregations

LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)196 Test (org.junit.Test)92 Path (org.apache.hadoop.fs.Path)86 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)72 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)49 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)49 Configuration (org.apache.hadoop.conf.Configuration)40 IOException (java.io.IOException)34 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)34 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)33 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)33 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)25 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)25 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)24 StorageType (org.apache.hadoop.fs.StorageType)23 ArrayList (java.util.ArrayList)22 Block (org.apache.hadoop.hdfs.protocol.Block)16 FileSystem (org.apache.hadoop.fs.FileSystem)15 InetSocketAddress (java.net.InetSocketAddress)11 File (java.io.File)9