Search in sources :

Example 1 with ReplicaBeingWritten

use of org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten in project hadoop by apache.

the class FsDatasetImplTestUtils method createRBW.

@Override
public Replica createRBW(FsVolumeSpi volume, ExtendedBlock eb) throws IOException {
    FsVolumeImpl vol = (FsVolumeImpl) volume;
    final String bpid = eb.getBlockPoolId();
    final Block block = eb.getLocalBlock();
    ReplicaBeingWritten rbw = new ReplicaBeingWritten(eb.getLocalBlock(), volume, vol.createRbwFile(bpid, block).getParentFile(), null);
    rbw.getBlockFile().createNewFile();
    rbw.getMetaFile().createNewFile();
    dataset.volumeMap.add(bpid, rbw);
    return rbw;
}
Also used : ReplicaBeingWritten(org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 2 with ReplicaBeingWritten

use of org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten in project hadoop by apache.

the class TestFileAppend method testConcurrentAppendRead.

@Test(timeout = 10000)
public void testConcurrentAppendRead() throws IOException, TimeoutException, InterruptedException {
    // Create a finalized replica and append to it
    // Read block data and checksum. Verify checksum.
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    conf.setInt("dfs.min.replication", 1);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        DataNode dn = cluster.getDataNodes().get(0);
        FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
        // create a file with 1 byte of data.
        long initialFileLength = 1;
        DistributedFileSystem fs = cluster.getFileSystem();
        Path fileName = new Path("/appendCorruptBlock");
        DFSTestUtil.createFile(fs, fileName, initialFileLength, (short) 1, 0);
        DFSTestUtil.waitReplication(fs, fileName, (short) 1);
        Assert.assertTrue("File not created", fs.exists(fileName));
        // Call FsDatasetImpl#append to append the block file,
        // which converts it to a rbw replica.
        ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
        long newGS = block.getGenerationStamp() + 1;
        ReplicaHandler replicaHandler = dataSet.append(block, newGS, initialFileLength);
        // write data to block file
        ReplicaBeingWritten rbw = (ReplicaBeingWritten) replicaHandler.getReplica();
        ReplicaOutputStreams outputStreams = rbw.createStreams(false, DEFAULT_CHECKSUM);
        OutputStream dataOutput = outputStreams.getDataOut();
        byte[] appendBytes = new byte[1];
        dataOutput.write(appendBytes, 0, 1);
        dataOutput.flush();
        dataOutput.close();
        // update checksum file
        final int smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
        FsDatasetUtil.computeChecksum(rbw.getMetaFile(), rbw.getMetaFile(), rbw.getBlockFile(), smallBufferSize, conf);
        // read the block
        // the DataNode BlockSender should read from the rbw replica's in-memory
        // checksum, rather than on-disk checksum. Otherwise it will see a
        // checksum mismatch error.
        final byte[] readBlock = DFSTestUtil.readFileBuffer(fs, fileName);
        assertEquals("should have read only one byte!", 1, readBlock.length);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ReplicaBeingWritten(org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) ReplicaOutputStreams(org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams) Test(org.junit.Test)

Example 3 with ReplicaBeingWritten

use of org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten in project hadoop by apache.

the class TestBlockListAsLongs method testUc.

@Test
public void testUc() {
    BlockListAsLongs blocks = checkReport(new ReplicaBeingWritten(b1, null, null, null));
    assertArrayEquals(new long[] { 0, 1, -1, -1, -1, 1, 11, 111, ReplicaState.RBW.getValue() }, blocks.getBlockListAsLongs());
}
Also used : ReplicaBeingWritten(org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten) Test(org.junit.Test)

Example 4 with ReplicaBeingWritten

use of org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten in project hadoop by apache.

the class TestBlockListAsLongs method testFuzz.

@Test
public void testFuzz() throws InterruptedException {
    Replica[] replicas = new Replica[100000];
    Random rand = new Random(0);
    for (int i = 0; i < replicas.length; i++) {
        Block b = new Block(rand.nextLong(), i, i << 4);
        switch(rand.nextInt(2)) {
            case 0:
                replicas[i] = new FinalizedReplica(b, null, null);
                break;
            case 1:
                replicas[i] = new ReplicaBeingWritten(b, null, null, null);
                break;
            case 2:
                replicas[i] = new ReplicaWaitingToBeRecovered(b, null, null);
                break;
        }
    }
    checkReport(replicas);
}
Also used : ReplicaWaitingToBeRecovered(org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered) ReplicaBeingWritten(org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten) Random(java.util.Random) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) BlockReportReplica(org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) Test(org.junit.Test)

Example 5 with ReplicaBeingWritten

use of org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten in project hadoop by apache.

the class TestBlockManager method testSafeModeIBRBeforeFirstFullBR.

/**
   * test when NN starts and in same mode, it receives an incremental blockReport
   * firstly. Then receives first full block report.
   */
@Test
public void testSafeModeIBRBeforeFirstFullBR() throws Exception {
    // pretend to be in safemode
    doReturn(true).when(fsn).isInStartupSafeMode();
    DatanodeDescriptor node = nodes.get(0);
    DatanodeStorageInfo ds = node.getStorageInfos()[0];
    node.setAlive(true);
    DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, "");
    // register new node
    bm.getDatanodeManager().registerDatanode(nodeReg);
    bm.getDatanodeManager().addDatanode(node);
    assertEquals(node, bm.getDatanodeManager().getDatanode(node));
    assertEquals(0, ds.getBlockReportCount());
    // Build a incremental report
    List<ReceivedDeletedBlockInfo> rdbiList = new ArrayList<>();
    // Build a full report
    BlockListAsLongs.Builder builder = BlockListAsLongs.builder();
    // blk_42 is finalized.
    // arbitrary
    long receivedBlockId = 42;
    BlockInfo receivedBlock = addBlockToBM(receivedBlockId);
    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
    builder.add(new FinalizedReplica(receivedBlock, null, null));
    // blk_43 is under construction.
    long receivingBlockId = 43;
    BlockInfo receivingBlock = addUcBlockToBM(receivingBlockId);
    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
    builder.add(new ReplicaBeingWritten(receivingBlock, null, null, null));
    // blk_44 has 2 records in IBR. It's finalized. So full BR has 1 record.
    long receivingReceivedBlockId = 44;
    BlockInfo receivingReceivedBlock = addBlockToBM(receivingReceivedBlockId);
    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
    builder.add(new FinalizedReplica(receivingReceivedBlock, null, null));
    // blk_45 is not in full BR, because it's deleted.
    long ReceivedDeletedBlockId = 45;
    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(ReceivedDeletedBlockId), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(ReceivedDeletedBlockId), ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null));
    // blk_46 exists in DN for a long time, so it's in full BR, but not in IBR.
    long existedBlockId = 46;
    BlockInfo existedBlock = addBlockToBM(existedBlockId);
    builder.add(new FinalizedReplica(existedBlock, null, null));
    // process IBR and full BR
    StorageReceivedDeletedBlocks srdb = new StorageReceivedDeletedBlocks(new DatanodeStorage(ds.getStorageID()), rdbiList.toArray(new ReceivedDeletedBlockInfo[rdbiList.size()]));
    bm.processIncrementalBlockReport(node, srdb);
    // Make sure it's the first full report
    assertEquals(0, ds.getBlockReportCount());
    bm.processReport(node, new DatanodeStorage(ds.getStorageID()), builder.build(), new BlockReportContext(1, 0, System.nanoTime(), 0, true));
    assertEquals(1, ds.getBlockReportCount());
    // verify the storage info is correct
    assertTrue(bm.getStoredBlock(new Block(receivedBlockId)).findStorageInfo(ds) >= 0);
    assertTrue(bm.getStoredBlock(new Block(receivingBlockId)).getUnderConstructionFeature().getNumExpectedLocations() > 0);
    assertTrue(bm.getStoredBlock(new Block(receivingReceivedBlockId)).findStorageInfo(ds) >= 0);
    assertNull(bm.getStoredBlock(new Block(ReceivedDeletedBlockId)));
    assertTrue(bm.getStoredBlock(new Block(existedBlock)).findStorageInfo(ds) >= 0);
}
Also used : ReplicaBeingWritten(org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten) ArrayList(java.util.ArrayList) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) Test(org.junit.Test)

Aggregations

ReplicaBeingWritten (org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten)7 Test (org.junit.Test)6 Block (org.apache.hadoop.hdfs.protocol.Block)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 FinalizedReplica (org.apache.hadoop.hdfs.server.datanode.FinalizedReplica)3 ArrayList (java.util.ArrayList)2 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 Path (org.apache.hadoop.fs.Path)2 BlockListAsLongs (org.apache.hadoop.hdfs.protocol.BlockListAsLongs)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)2 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)2 ReplicaWaitingToBeRecovered (org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered)2 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)2 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)2 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)2 StorageReceivedDeletedBlocks (org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks)2 OutputStream (java.io.OutputStream)1 Random (java.util.Random)1 Configuration (org.apache.hadoop.conf.Configuration)1 BlockReportReplica (org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica)1