Search in sources :

Example 81 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestFileAppend3 method testTC7.

/**
   * TC7: Corrupted replicas are present.
   * @throws IOException an exception might be thrown
   */
private void testTC7(boolean appendToNewBlock) throws Exception {
    final short repl = 2;
    final Path p = new Path("/TC7/foo" + (appendToNewBlock ? "0" : "1"));
    System.out.println("p=" + p);
    //a. Create file with replication factor of 2. Write half block of data. Close file.
    final int len1 = (int) (BLOCK_SIZE / 2);
    {
        FSDataOutputStream out = fs.create(p, false, buffersize, repl, BLOCK_SIZE);
        AppendTestUtil.write(out, 0, len1);
        out.close();
    }
    DFSTestUtil.waitReplication(fs, p, repl);
    //b. Log into one datanode that has one replica of this block.
    //   Find the block file on this datanode and truncate it to zero size.
    final LocatedBlocks locatedblocks = fs.dfs.getNamenode().getBlockLocations(p.toString(), 0L, len1);
    assertEquals(1, locatedblocks.locatedBlockCount());
    final LocatedBlock lb = locatedblocks.get(0);
    final ExtendedBlock blk = lb.getBlock();
    assertEquals(len1, lb.getBlockSize());
    DatanodeInfo[] datanodeinfos = lb.getLocations();
    assertEquals(repl, datanodeinfos.length);
    final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
    cluster.getMaterializedReplica(dn, blk).truncateData(0);
    //c. Open file in "append mode".  Append a new block worth of data. Close file.
    final int len2 = (int) BLOCK_SIZE;
    {
        FSDataOutputStream out = appendToNewBlock ? fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null) : fs.append(p);
        AppendTestUtil.write(out, len1, len2);
        out.close();
    }
    //d. Reopen file and read two blocks worth of data.
    AppendTestUtil.check(fs, p, len1 + len2);
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 82 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestFileAppend3 method testTC11.

/**
   * TC11: Racing rename
   */
private void testTC11(boolean appendToNewBlock) throws Exception {
    final Path p = new Path("/TC11/foo" + (appendToNewBlock ? "0" : "1"));
    System.out.println("p=" + p);
    //a. Create file and write one block of data. Close file.
    final int len1 = (int) BLOCK_SIZE;
    {
        FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
        AppendTestUtil.write(out, 0, len1);
        out.close();
    }
    //b. Reopen file in "append" mode. Append half block of data.
    FSDataOutputStream out = appendToNewBlock ? fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null) : fs.append(p);
    final int len2 = (int) BLOCK_SIZE / 2;
    AppendTestUtil.write(out, len1, len2);
    out.hflush();
    //c. Rename file to file.new.
    final Path pnew = new Path(p + ".new");
    assertTrue(fs.rename(p, pnew));
    //d. Close file handle that was opened in (b). 
    out.close();
    //check block sizes
    final long len = fs.getFileStatus(pnew).getLen();
    final LocatedBlocks locatedblocks = fs.dfs.getNamenode().getBlockLocations(pnew.toString(), 0L, len);
    final int numblock = locatedblocks.locatedBlockCount();
    for (int i = 0; i < numblock; i++) {
        final LocatedBlock lb = locatedblocks.get(i);
        final ExtendedBlock blk = lb.getBlock();
        final long size = lb.getBlockSize();
        if (i < numblock - 1) {
            assertEquals(BLOCK_SIZE, size);
        }
        for (DatanodeInfo datanodeinfo : lb.getLocations()) {
            final DataNode dn = cluster.getDataNode(datanodeinfo.getIpcPort());
            final Block metainfo = DataNodeTestUtils.getFSDataset(dn).getStoredBlock(blk.getBlockPoolId(), blk.getBlockId());
            assertEquals(size, metainfo.getNumBytes());
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 83 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestFileAppend method testMultiAppend2.

/**
   * Old replica of the block should not be accepted as valid for append/read
   */
@Test
public void testMultiAppend2() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "false");
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    DistributedFileSystem fs = null;
    final String hello = "hello\n";
    try {
        fs = cluster.getFileSystem();
        Path path = new Path("/test");
        FSDataOutputStream out = fs.create(path);
        out.writeBytes(hello);
        out.close();
        // stop one datanode
        DataNodeProperties dnProp = cluster.stopDataNode(0);
        String dnAddress = dnProp.datanode.getXferAddress().toString();
        if (dnAddress.startsWith("/")) {
            dnAddress = dnAddress.substring(1);
        }
        // append again to bump genstamps
        for (int i = 0; i < 2; i++) {
            out = fs.append(path, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
            out.writeBytes(hello);
            out.close();
        }
        // re-open and make the block state as underconstruction
        out = fs.append(path, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
        cluster.restartDataNode(dnProp, true);
        // wait till the block report comes
        Thread.sleep(2000);
        out.writeBytes(hello);
        out.close();
        // check the block locations
        LocatedBlocks blocks = fs.getClient().getLocatedBlocks(path.toString(), 0L);
        // since we append the file 3 time, we should be 4 blocks
        assertEquals(4, blocks.getLocatedBlocks().size());
        for (LocatedBlock block : blocks.getLocatedBlocks()) {
            assertEquals(hello.length(), block.getBlockSize());
        }
        StringBuilder sb = new StringBuilder();
        for (int i = 0; i < 4; i++) {
            sb.append(hello);
        }
        final byte[] content = sb.toString().getBytes();
        AppendTestUtil.checkFullFile(fs, path, content.length, content, "Read /test");
        // restart namenode to make sure the editlog can be properly applied
        cluster.restartNameNode(true);
        cluster.waitActive();
        AppendTestUtil.checkFullFile(fs, path, content.length, content, "Read /test");
        blocks = fs.getClient().getLocatedBlocks(path.toString(), 0L);
        // since we append the file 3 time, we should be 4 blocks
        assertEquals(4, blocks.getLocatedBlocks().size());
        for (LocatedBlock block : blocks.getLocatedBlocks()) {
            assertEquals(hello.length(), block.getBlockSize());
        }
    } finally {
        IOUtils.closeStream(fs);
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 84 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestFileChecksum method getDataNodeToKill.

/**
   * Determine the datanode that hosts the first block of the file. For simple
   * this just returns the first datanode as it's firstly tried.
   */
int getDataNodeToKill(String filePath) throws IOException {
    LocatedBlocks locatedBlocks = client.getLocatedBlocks(filePath, 0);
    LocatedBlock locatedBlock = locatedBlocks.get(0);
    DatanodeInfo[] datanodes = locatedBlock.getLocations();
    DatanodeInfo chosenDn = datanodes[0];
    int idx = 0;
    for (DataNode dn : cluster.getDataNodes()) {
        if (dn.getInfoPort() == chosenDn.getInfoPort()) {
            return idx;
        }
        idx++;
    }
    return -1;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 85 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestHFlush method hSyncEndBlock_00.

/**
   * Test hsync with END_BLOCK flag.
   */
@Test
public void hSyncEndBlock_00() throws IOException {
    final int preferredBlockSize = 1024;
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, preferredBlockSize);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    DistributedFileSystem fileSystem = cluster.getFileSystem();
    FSDataOutputStream stm = null;
    try {
        Path path = new Path("/" + fName);
        stm = fileSystem.create(path, true, 4096, (short) 2, AppendTestUtil.BLOCK_SIZE);
        System.out.println("Created file " + path.toString());
        ((DFSOutputStream) stm.getWrappedStream()).hsync(EnumSet.of(SyncFlag.END_BLOCK));
        long currentFileLength = fileSystem.getFileStatus(path).getLen();
        assertEquals(0L, currentFileLength);
        LocatedBlocks blocks = fileSystem.dfs.getLocatedBlocks(path.toString(), 0);
        assertEquals(0, blocks.getLocatedBlocks().size());
        // write a block and call hsync(end_block) at the block boundary
        stm.write(new byte[preferredBlockSize]);
        ((DFSOutputStream) stm.getWrappedStream()).hsync(EnumSet.of(SyncFlag.END_BLOCK));
        currentFileLength = fileSystem.getFileStatus(path).getLen();
        assertEquals(preferredBlockSize, currentFileLength);
        blocks = fileSystem.dfs.getLocatedBlocks(path.toString(), 0);
        assertEquals(1, blocks.getLocatedBlocks().size());
        // call hsync then call hsync(end_block) immediately
        stm.write(new byte[preferredBlockSize / 2]);
        stm.hsync();
        ((DFSOutputStream) stm.getWrappedStream()).hsync(EnumSet.of(SyncFlag.END_BLOCK));
        currentFileLength = fileSystem.getFileStatus(path).getLen();
        assertEquals(preferredBlockSize + preferredBlockSize / 2, currentFileLength);
        blocks = fileSystem.dfs.getLocatedBlocks(path.toString(), 0);
        assertEquals(2, blocks.getLocatedBlocks().size());
        stm.write(new byte[preferredBlockSize / 4]);
        stm.hsync();
        currentFileLength = fileSystem.getFileStatus(path).getLen();
        assertEquals(preferredBlockSize + preferredBlockSize / 2 + preferredBlockSize / 4, currentFileLength);
        blocks = fileSystem.dfs.getLocatedBlocks(path.toString(), 0);
        assertEquals(3, blocks.getLocatedBlocks().size());
    } finally {
        IOUtils.cleanup(null, stm, fileSystem);
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Aggregations

LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)118 Test (org.junit.Test)67 Path (org.apache.hadoop.fs.Path)65 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)52 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)33 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)32 Configuration (org.apache.hadoop.conf.Configuration)29 IOException (java.io.IOException)20 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)20 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)20 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)18 FileSystem (org.apache.hadoop.fs.FileSystem)17 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)17 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)13 Block (org.apache.hadoop.hdfs.protocol.Block)11 InetSocketAddress (java.net.InetSocketAddress)10 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)9 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)7 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)7