Search in sources :

Example 6 with Replica

use of org.apache.hadoop.hdfs.server.datanode.Replica in project hadoop by apache.

the class TestBlockListAsLongs method testFuzz.

@Test
public void testFuzz() throws InterruptedException {
    Replica[] replicas = new Replica[100000];
    Random rand = new Random(0);
    for (int i = 0; i < replicas.length; i++) {
        Block b = new Block(rand.nextLong(), i, i << 4);
        switch(rand.nextInt(2)) {
            case 0:
                replicas[i] = new FinalizedReplica(b, null, null);
                break;
            case 1:
                replicas[i] = new ReplicaBeingWritten(b, null, null, null);
                break;
            case 2:
                replicas[i] = new ReplicaWaitingToBeRecovered(b, null, null);
                break;
        }
    }
    checkReport(replicas);
}
Also used : ReplicaWaitingToBeRecovered(org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered) ReplicaBeingWritten(org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten) Random(java.util.Random) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) BlockReportReplica(org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) Test(org.junit.Test)

Example 7 with Replica

use of org.apache.hadoop.hdfs.server.datanode.Replica in project hadoop by apache.

the class TestBlockListAsLongs method checkReport.

private BlockListAsLongs checkReport(Replica... replicas) {
    Map<Long, Replica> expectedReplicas = new HashMap<>();
    for (Replica replica : replicas) {
        expectedReplicas.put(replica.getBlockId(), replica);
    }
    expectedReplicas = Collections.unmodifiableMap(expectedReplicas);
    // encode the blocks and extract the buffers
    BlockListAsLongs blocks = BlockListAsLongs.encode(expectedReplicas.values());
    List<ByteString> buffers = blocks.getBlocksBuffers();
    // convert to old-style list of longs
    List<Long> longs = new ArrayList<Long>();
    for (long value : blocks.getBlockListAsLongs()) {
        longs.add(value);
    }
    // decode the buffers and verify its contents
    BlockListAsLongs decodedBlocks = BlockListAsLongs.decodeBuffers(expectedReplicas.size(), buffers);
    checkReplicas(expectedReplicas, decodedBlocks);
    // decode the long and verify its contents
    BlockListAsLongs decodedList = BlockListAsLongs.decodeLongs(longs);
    checkReplicas(expectedReplicas, decodedList);
    return blocks;
}
Also used : HashMap(java.util.HashMap) ByteString(com.google.protobuf.ByteString) ArrayList(java.util.ArrayList) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) BlockReportReplica(org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica)

Example 8 with Replica

use of org.apache.hadoop.hdfs.server.datanode.Replica in project hadoop by apache.

the class TestInterDatanodeProtocol method testUpdateReplicaUnderRecovery.

/** 
   * Test  for
   * {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)} 
   * */
@Test
public void testUpdateReplicaUnderRecovery() throws IOException {
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        //create a file
        DistributedFileSystem dfs = cluster.getFileSystem();
        String filestr = "/foo";
        Path filepath = new Path(filestr);
        DFSTestUtil.createFile(dfs, filepath, 1024L, (short) 3, 0L);
        //get block info
        final LocatedBlock locatedblock = getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
        final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
        Assert.assertTrue(datanodeinfo.length > 0);
        //get DataNode and FSDataset objects
        final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
        Assert.assertTrue(datanode != null);
        //initReplicaRecovery
        final ExtendedBlock b = locatedblock.getBlock();
        final long recoveryid = b.getGenerationStamp() + 1;
        final long newlength = b.getNumBytes() - 1;
        final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode);
        final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(new RecoveringBlock(b, null, recoveryid));
        //check replica
        final Replica replica = cluster.getFsDatasetTestUtils(datanode).fetchReplica(b);
        Assert.assertEquals(ReplicaState.RUR, replica.getState());
        //check meta data before update
        cluster.getFsDatasetTestUtils(datanode).checkStoredReplica(replica);
        //case "THIS IS NOT SUPPOSED TO HAPPEN"
        //with (block length) != (stored replica's on disk length). 
        {
            //create a block with same id and gs but different length.
            final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri.getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
            try {
                //update should fail
                fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, tmp.getBlockId(), newlength);
                Assert.fail();
            } catch (IOException ioe) {
                System.out.println("GOOD: getting " + ioe);
            }
        }
        //update
        final Replica r = fsdataset.updateReplicaUnderRecovery(new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, rri.getBlockId(), newlength);
        assertTrue(r != null);
        assertTrue(r.getStorageUuid() != null);
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica) ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) Test(org.junit.Test)

Example 9 with Replica

use of org.apache.hadoop.hdfs.server.datanode.Replica in project hadoop by apache.

the class TestPipelines method pipeline_01.

/**
   * Creates and closes a file of certain length.
   * Calls append to allow next write() operation to add to the end of it
   * After write() invocation, calls hflush() to make sure that data sunk through
   * the pipeline and check the state of the last block's replica.
   * It supposes to be in RBW state
   *
   * @throws IOException in case of an error
   */
@Test
public void pipeline_01() throws IOException {
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + METHOD_NAME);
    }
    Path filePath = new Path("/" + METHOD_NAME + ".dat");
    DFSTestUtil.createFile(fs, filePath, FILE_SIZE, REPL_FACTOR, rand.nextLong());
    if (LOG.isDebugEnabled()) {
        LOG.debug("Invoking append but doing nothing otherwise...");
    }
    FSDataOutputStream ofs = fs.append(filePath);
    ofs.writeBytes("Some more stuff to write");
    ((DFSOutputStream) ofs.getWrappedStream()).hflush();
    List<LocatedBlock> lb = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();
    for (DataNode dn : cluster.getDataNodes()) {
        Replica r = cluster.getFsDatasetTestUtils(dn).fetchReplica(lb.get(0).getBlock());
        assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
        assertEquals("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()", HdfsServerConstants.ReplicaState.RBW, r.getState());
    }
    ofs.close();
}
Also used : Path(org.apache.hadoop.fs.Path) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Replica(org.apache.hadoop.hdfs.server.datanode.Replica) Test(org.junit.Test)

Aggregations

Replica (org.apache.hadoop.hdfs.server.datanode.Replica)9 FinalizedReplica (org.apache.hadoop.hdfs.server.datanode.FinalizedReplica)7 BlockReportReplica (org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica)5 Test (org.junit.Test)4 ArrayList (java.util.ArrayList)3 IOException (java.io.IOException)2 HashMap (java.util.HashMap)2 Path (org.apache.hadoop.fs.Path)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)2 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)2 ByteString (com.google.protobuf.ByteString)1 RpcController (com.google.protobuf.RpcController)1 File (java.io.File)1 Random (java.util.Random)1 AtomicReference (java.util.concurrent.atomic.AtomicReference)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 BlockLocalPathInfo (org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1