Search in sources :

Example 6 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestFsDatasetImpl method testDeletingBlocks.

@Test
public void testDeletingBlocks() throws IOException {
    HdfsConfiguration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        DataNode dn = cluster.getDataNodes().get(0);
        FsDatasetSpi<?> ds = DataNodeTestUtils.getFSDataset(dn);
        ds.addBlockPool(BLOCKPOOL, conf);
        FsVolumeImpl vol;
        try (FsDatasetSpi.FsVolumeReferences volumes = ds.getFsVolumeReferences()) {
            vol = (FsVolumeImpl) volumes.get(0);
        }
        ExtendedBlock eb;
        ReplicaInfo info;
        List<Block> blockList = new ArrayList<>();
        for (int i = 1; i <= 63; i++) {
            eb = new ExtendedBlock(BLOCKPOOL, i, 1, 1000 + i);
            cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
            blockList.add(eb.getLocalBlock());
        }
        ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
        // Nothing to do
        }
        assertTrue(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
        blockList.clear();
        eb = new ExtendedBlock(BLOCKPOOL, 64, 1, 1064);
        cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
        blockList.add(eb.getLocalBlock());
        ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
        // Nothing to do
        }
        assertFalse(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Test(org.junit.Test)

Example 7 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class FsDatasetImplTestUtils method createRBW.

@Override
public Replica createRBW(FsVolumeSpi volume, ExtendedBlock eb) throws IOException {
    FsVolumeImpl vol = (FsVolumeImpl) volume;
    final String bpid = eb.getBlockPoolId();
    final Block block = eb.getLocalBlock();
    ReplicaBeingWritten rbw = new ReplicaBeingWritten(eb.getLocalBlock(), volume, vol.createRbwFile(bpid, block).getParentFile(), null);
    rbw.getBlockFile().createNewFile();
    rbw.getMetaFile().createNewFile();
    dataset.volumeMap.add(bpid, rbw);
    return rbw;
}
Also used : ReplicaBeingWritten(org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 8 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestSimulatedFSDataset method testInjectionEmpty.

@Test
public void testInjectionEmpty() throws IOException {
    SimulatedFSDataset fsdataset = getSimulatedFSDataset();
    BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
    assertEquals(0, blockReport.getNumberOfBlocks());
    int bytesAdded = addSomeBlocks(fsdataset);
    blockReport = fsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
    for (Block b : blockReport) {
        assertNotNull(b);
        assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
    }
    // Inject blocks into an empty fsdataset
    //  - injecting the blocks we got above.
    SimulatedFSDataset sfsdataset = getSimulatedFSDataset();
    sfsdataset.injectBlocks(bpid, blockReport);
    blockReport = sfsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
    for (Block b : blockReport) {
        assertNotNull(b);
        assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
        assertEquals(blockIdToLen(b.getBlockId()), sfsdataset.getLength(new ExtendedBlock(bpid, b)));
    }
    assertEquals(bytesAdded, sfsdataset.getDfsUsed());
    assertEquals(sfsdataset.getCapacity() - bytesAdded, sfsdataset.getRemaining());
}
Also used : BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 9 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestSimulatedFSDataset method testInvalidate.

@Test
public void testInvalidate() throws IOException {
    final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
    int bytesAdded = addSomeBlocks(fsdataset);
    Block[] deleteBlocks = new Block[2];
    deleteBlocks[0] = new Block(1, 0, 0);
    deleteBlocks[1] = new Block(2, 0, 0);
    fsdataset.invalidate(bpid, deleteBlocks);
    checkInvalidBlock(new ExtendedBlock(bpid, deleteBlocks[0]));
    checkInvalidBlock(new ExtendedBlock(bpid, deleteBlocks[1]));
    long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
    assertEquals(bytesAdded - sizeDeleted, fsdataset.getDfsUsed());
    assertEquals(fsdataset.getCapacity() - bytesAdded + sizeDeleted, fsdataset.getRemaining());
    // Now make sure the rest of the blocks are valid
    for (int i = 3; i <= NUMBLOCKS; ++i) {
        Block b = new Block(i, 0, 0);
        assertTrue(fsdataset.isValidBlock(new ExtendedBlock(bpid, b)));
    }
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 10 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestSimulatedFSDataset method testInjectionNonEmpty.

@Test
public void testInjectionNonEmpty() throws IOException {
    SimulatedFSDataset fsdataset = getSimulatedFSDataset();
    BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
    assertEquals(0, blockReport.getNumberOfBlocks());
    int bytesAdded = addSomeBlocks(fsdataset);
    blockReport = fsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
    for (Block b : blockReport) {
        assertNotNull(b);
        assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
    }
    fsdataset = null;
    // Inject blocks into an non-empty fsdataset
    //  - injecting the blocks we got above.
    SimulatedFSDataset sfsdataset = getSimulatedFSDataset();
    // Add come blocks whose block ids do not conflict with
    // the ones we are going to inject.
    bytesAdded += addSomeBlocks(sfsdataset, NUMBLOCKS + 1, false);
    sfsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
    sfsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
    sfsdataset.injectBlocks(bpid, blockReport);
    blockReport = sfsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS * 2, blockReport.getNumberOfBlocks());
    for (Block b : blockReport) {
        assertNotNull(b);
        assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
        assertEquals(blockIdToLen(b.getBlockId()), sfsdataset.getLength(new ExtendedBlock(bpid, b)));
    }
    assertEquals(bytesAdded, sfsdataset.getDfsUsed());
    assertEquals(sfsdataset.getCapacity() - bytesAdded, sfsdataset.getRemaining());
    // Now test that the dataset cannot be created if it does not have sufficient cap
    conf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY, 10);
    try {
        sfsdataset = getSimulatedFSDataset();
        sfsdataset.addBlockPool(bpid, conf);
        sfsdataset.injectBlocks(bpid, blockReport);
        assertTrue("Expected an IO exception", false);
    } catch (IOException e) {
    // ok - as expected
    }
}
Also used : BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

Block (org.apache.hadoop.hdfs.protocol.Block)155 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)79 Test (org.junit.Test)77 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)74 Path (org.apache.hadoop.fs.Path)28 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)26 IOException (java.io.IOException)24 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)22 Configuration (org.apache.hadoop.conf.Configuration)20 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)17 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)17 CachedBlock (org.apache.hadoop.hdfs.server.namenode.CachedBlock)17 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)15 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)14 ArrayList (java.util.ArrayList)12 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)11 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)10