Search in sources :

Example 26 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class FSNamesystem method reportBadBlocks.

/**
   * Client is reporting some bad block locations.
   */
void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
    checkOperation(OperationCategory.WRITE);
    writeLock();
    try {
        checkOperation(OperationCategory.WRITE);
        for (int i = 0; i < blocks.length; i++) {
            ExtendedBlock blk = blocks[i].getBlock();
            DatanodeInfo[] nodes = blocks[i].getLocations();
            String[] storageIDs = blocks[i].getStorageIDs();
            for (int j = 0; j < nodes.length; j++) {
                NameNode.stateChangeLog.info("*DIR* reportBadBlocks for block: {} on" + " datanode: {}", blk, nodes[j].getXferAddr());
                blockManager.findAndMarkBlockAsCorrupt(blk, nodes[j], storageIDs == null ? null : storageIDs[j], "client machine reported it");
            }
        }
    } finally {
        writeUnlock("reportBadBlocks");
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock)

Example 27 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestFileAppend method testConcurrentAppendRead.

@Test(timeout = 10000)
public void testConcurrentAppendRead() throws IOException, TimeoutException, InterruptedException {
    // Create a finalized replica and append to it
    // Read block data and checksum. Verify checksum.
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    conf.setInt("dfs.min.replication", 1);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        DataNode dn = cluster.getDataNodes().get(0);
        FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
        // create a file with 1 byte of data.
        long initialFileLength = 1;
        DistributedFileSystem fs = cluster.getFileSystem();
        Path fileName = new Path("/appendCorruptBlock");
        DFSTestUtil.createFile(fs, fileName, initialFileLength, (short) 1, 0);
        DFSTestUtil.waitReplication(fs, fileName, (short) 1);
        Assert.assertTrue("File not created", fs.exists(fileName));
        // Call FsDatasetImpl#append to append the block file,
        // which converts it to a rbw replica.
        ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
        long newGS = block.getGenerationStamp() + 1;
        ReplicaHandler replicaHandler = dataSet.append(block, newGS, initialFileLength);
        // write data to block file
        ReplicaBeingWritten rbw = (ReplicaBeingWritten) replicaHandler.getReplica();
        ReplicaOutputStreams outputStreams = rbw.createStreams(false, DEFAULT_CHECKSUM);
        OutputStream dataOutput = outputStreams.getDataOut();
        byte[] appendBytes = new byte[1];
        dataOutput.write(appendBytes, 0, 1);
        dataOutput.flush();
        dataOutput.close();
        // update checksum file
        final int smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
        FsDatasetUtil.computeChecksum(rbw.getMetaFile(), rbw.getMetaFile(), rbw.getBlockFile(), smallBufferSize, conf);
        // read the block
        // the DataNode BlockSender should read from the rbw replica's in-memory
        // checksum, rather than on-disk checksum. Otherwise it will see a
        // checksum mismatch error.
        final byte[] readBlock = DFSTestUtil.readFileBuffer(fs, fileName);
        assertEquals("should have read only one byte!", 1, readBlock.length);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ReplicaBeingWritten(org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) ReplicaOutputStreams(org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams) Test(org.junit.Test)

Example 28 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestFileAppend method testBreakHardlinksIfNeeded.

@Test
public void testBreakHardlinksIfNeeded() throws IOException {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    try {
        // create a new file, write to it and close it.
        Path file1 = new Path("/filestatus.dat");
        FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
        writeFile(stm);
        stm.close();
        // Get a handle to the datanode
        DataNode[] dn = cluster.listDataNodes();
        assertTrue("There should be only one datanode but found " + dn.length, dn.length == 1);
        LocatedBlocks locations = client.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
        List<LocatedBlock> blocks = locations.getLocatedBlocks();
        final FsDatasetSpi<?> fsd = dn[0].getFSDataset();
        //
        for (int i = 0; i < blocks.size(); i = i + 2) {
            ExtendedBlock b = blocks.get(i).getBlock();
            final File f = FsDatasetTestUtil.getBlockFile(fsd, b.getBlockPoolId(), b.getLocalBlock());
            File link = new File(f.toString() + ".link");
            System.out.println("Creating hardlink for File " + f + " to " + link);
            HardLink.createHardLink(f, link);
        }
        // Detach all blocks. This should remove hardlinks (if any)
        for (int i = 0; i < blocks.size(); i++) {
            ExtendedBlock b = blocks.get(i).getBlock();
            System.out.println("breakHardlinksIfNeeded detaching block " + b);
            assertTrue("breakHardlinksIfNeeded(" + b + ") should have returned true", FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b));
        }
        // return false
        for (int i = 0; i < blocks.size(); i++) {
            ExtendedBlock b = blocks.get(i).getBlock();
            System.out.println("breakHardlinksIfNeeded re-attempting to " + "detach block " + b);
            assertTrue("breakHardlinksIfNeeded(" + b + ") should have returned false", FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b));
        }
    } finally {
        client.close();
        fs.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) File(java.io.File) Test(org.junit.Test)

Example 29 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestFileCreation method testLeaseExpireHardLimit.

/**
   * Create a file, write something, hflush but not close.
   * Then change lease period and wait for lease recovery.
   * Finally, read the block directly from each Datanode and verify the content.
   */
@Test
public void testLeaseExpireHardLimit() throws Exception {
    System.out.println("testLeaseExpireHardLimit start");
    final long leasePeriod = 1000;
    final int DATANODE_NUM = 3;
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
    // create cluster
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
    DistributedFileSystem dfs = null;
    try {
        cluster.waitActive();
        dfs = cluster.getFileSystem();
        // create a new file.
        final String f = DIR + "foo";
        final Path fpath = new Path(f);
        HdfsDataOutputStream out = create(dfs, fpath, DATANODE_NUM);
        out.write("something".getBytes());
        out.hflush();
        int actualRepl = out.getCurrentBlockReplication();
        assertTrue(f + " should be replicated to " + DATANODE_NUM + " datanodes.", actualRepl == DATANODE_NUM);
        // set the soft and hard limit to be 1 second so that the
        // namenode triggers lease recovery
        cluster.setLeasePeriod(leasePeriod, leasePeriod);
        // wait for the lease to expire
        try {
            Thread.sleep(5 * leasePeriod);
        } catch (InterruptedException e) {
        }
        LocatedBlocks locations = dfs.dfs.getNamenode().getBlockLocations(f, 0, Long.MAX_VALUE);
        assertEquals(1, locations.locatedBlockCount());
        LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
        int successcount = 0;
        for (DatanodeInfo datanodeinfo : locatedblock.getLocations()) {
            DataNode datanode = cluster.getDataNode(datanodeinfo.getIpcPort());
            ExtendedBlock blk = locatedblock.getBlock();
            try (BufferedReader in = new BufferedReader(new InputStreamReader(datanode.getFSDataset().getBlockInputStream(blk, 0)))) {
                assertEquals("something", in.readLine());
                successcount++;
            }
        }
        System.out.println("successcount=" + successcount);
        assertTrue(successcount > 0);
    } finally {
        IOUtils.closeStream(dfs);
        cluster.shutdown();
    }
    System.out.println("testLeaseExpireHardLimit successful");
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) InputStreamReader(java.io.InputStreamReader) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BufferedReader(java.io.BufferedReader) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) Test(org.junit.Test)

Example 30 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestPBHelper method testConvertExtendedBlock.

@Test
public void testConvertExtendedBlock() {
    ExtendedBlock b = getExtendedBlock();
    ExtendedBlockProto bProto = PBHelperClient.convert(b);
    ExtendedBlock b1 = PBHelperClient.convert(bProto);
    assertEquals(b, b1);
    b.setBlockId(-1);
    bProto = PBHelperClient.convert(b);
    b1 = PBHelperClient.convert(bProto);
    assertEquals(b, b1);
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlockProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) Test(org.junit.Test)

Aggregations

ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)208 Test (org.junit.Test)124 Path (org.apache.hadoop.fs.Path)91 Configuration (org.apache.hadoop.conf.Configuration)71 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)63 FileSystem (org.apache.hadoop.fs.FileSystem)62 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)55 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)53 IOException (java.io.IOException)41 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)41 Block (org.apache.hadoop.hdfs.protocol.Block)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)34 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)32 File (java.io.File)22 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)18 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)18 InetSocketAddress (java.net.InetSocketAddress)17 ArrayList (java.util.ArrayList)17