Search in sources :

Example 96 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestDataTransferProtocol method testDataTransferProtocol.

@Test
public void testDataTransferProtocol() throws IOException {
    Random random = new Random();
    int oneMil = 1024 * 1024;
    Path file = new Path("dataprotocol.dat");
    int numDataNodes = 1;
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
    try {
        cluster.waitActive();
        datanode = cluster.getFileSystem().getDataNodeStats(DatanodeReportType.LIVE)[0];
        dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
        FileSystem fileSys = cluster.getFileSystem();
        int fileLen = Math.min(conf.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096), 4096);
        DFSTestUtil.createFile(fileSys, file, fileLen, fileLen, fileSys.getDefaultBlockSize(file), fileSys.getDefaultReplication(file), 0L);
        // get the first blockid for the file
        final ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
        final String poolId = firstBlock.getBlockPoolId();
        long newBlockId = firstBlock.getBlockId() + 1;
        recvBuf.reset();
        sendBuf.reset();
        // bad version
        recvOut.writeShort((short) (DataTransferProtocol.DATA_TRANSFER_VERSION - 1));
        sendOut.writeShort((short) (DataTransferProtocol.DATA_TRANSFER_VERSION - 1));
        sendRecvData("Wrong Version", true);
        // bad ops
        sendBuf.reset();
        sendOut.writeShort((short) DataTransferProtocol.DATA_TRANSFER_VERSION);
        sendOut.writeByte(Op.WRITE_BLOCK.code - 1);
        sendRecvData("Wrong Op Code", true);
        /* Test OP_WRITE_BLOCK */
        sendBuf.reset();
        DataChecksum badChecksum = Mockito.spy(DEFAULT_CHECKSUM);
        Mockito.doReturn(-1).when(badChecksum).getBytesPerChecksum();
        writeBlock(poolId, newBlockId, badChecksum);
        recvBuf.reset();
        sendResponse(Status.ERROR, null, null, recvOut);
        sendRecvData("wrong bytesPerChecksum while writing", true);
        sendBuf.reset();
        recvBuf.reset();
        writeBlock(poolId, ++newBlockId, DEFAULT_CHECKSUM);
        PacketHeader hdr = new PacketHeader(// size of packet
        4, // offset in block,
        0, // seqno
        100, // last packet
        false, // bad datalen
        -1 - random.nextInt(oneMil), false);
        hdr.write(sendOut);
        sendResponse(Status.SUCCESS, "", null, recvOut);
        new PipelineAck(100, new int[] { PipelineAck.combineHeader(PipelineAck.ECN.DISABLED, Status.ERROR) }).write(recvOut);
        sendRecvData("negative DATA_CHUNK len while writing block " + newBlockId, true);
        // test for writing a valid zero size block
        sendBuf.reset();
        recvBuf.reset();
        writeBlock(poolId, ++newBlockId, DEFAULT_CHECKSUM);
        hdr = new PacketHeader(// size of packet
        8, // OffsetInBlock
        0, // sequencenumber
        100, // lastPacketInBlock
        true, // chunk length
        0, false);
        hdr.write(sendOut);
        // zero checksum
        sendOut.writeInt(0);
        sendOut.flush();
        //ok finally write a block with 0 len
        sendResponse(Status.SUCCESS, "", null, recvOut);
        new PipelineAck(100, new int[] { PipelineAck.combineHeader(PipelineAck.ECN.DISABLED, Status.SUCCESS) }).write(recvOut);
        sendRecvData("Writing a zero len block blockid " + newBlockId, false);
        /* Test OP_READ_BLOCK */
        String bpid = cluster.getNamesystem().getBlockPoolId();
        ExtendedBlock blk = new ExtendedBlock(bpid, firstBlock.getLocalBlock());
        long blkid = blk.getBlockId();
        // bad block id
        sendBuf.reset();
        recvBuf.reset();
        blk.setBlockId(blkid - 1);
        sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", 0L, fileLen, true, CachingStrategy.newDefaultStrategy());
        sendRecvData("Wrong block ID " + newBlockId + " for read", false);
        // negative block start offset -1L
        sendBuf.reset();
        blk.setBlockId(blkid);
        sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", -1L, fileLen, true, CachingStrategy.newDefaultStrategy());
        sendRecvData("Negative start-offset for read for block " + firstBlock.getBlockId(), false);
        // bad block start offset
        sendBuf.reset();
        sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", fileLen, fileLen, true, CachingStrategy.newDefaultStrategy());
        sendRecvData("Wrong start-offset for reading block " + firstBlock.getBlockId(), false);
        // negative length is ok. Datanode assumes we want to read the whole block.
        recvBuf.reset();
        BlockOpResponseProto.newBuilder().setStatus(Status.SUCCESS).setReadOpChecksumInfo(ReadOpChecksumInfoProto.newBuilder().setChecksum(DataTransferProtoUtil.toProto(DEFAULT_CHECKSUM)).setChunkOffset(0L)).build().writeDelimitedTo(recvOut);
        sendBuf.reset();
        sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", 0L, -1L - random.nextInt(oneMil), true, CachingStrategy.newDefaultStrategy());
        sendRecvData("Negative length for reading block " + firstBlock.getBlockId(), false);
        // length is more than size of block.
        recvBuf.reset();
        sendResponse(Status.ERROR, null, "opReadBlock " + firstBlock + " received exception java.io.IOException:  " + "Offset 0 and length 4097 don't match block " + firstBlock + " ( blockLen 4096 )", recvOut);
        sendBuf.reset();
        sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", 0L, fileLen + 1, true, CachingStrategy.newDefaultStrategy());
        sendRecvData("Wrong length for reading block " + firstBlock.getBlockId(), false);
        //At the end of all this, read the file to make sure that succeeds finally.
        sendBuf.reset();
        sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", 0L, fileLen, true, CachingStrategy.newDefaultStrategy());
        readFile(fileSys, file, fileLen);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) Builder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) PipelineAck(org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck) DataChecksum(org.apache.hadoop.util.DataChecksum) Random(java.util.Random) FileSystem(org.apache.hadoop.fs.FileSystem) PacketHeader(org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader) Test(org.junit.Test)

Example 97 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestDFSShell method getMaterializedReplicas.

private static List<MaterializedReplica> getMaterializedReplicas(MiniDFSCluster cluster) throws IOException {
    List<MaterializedReplica> replicas = new ArrayList<>();
    String poolId = cluster.getNamesystem().getBlockPoolId();
    List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
    for (int i = 0; i < blocks.size(); i++) {
        Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
        for (Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
            for (Block b : e.getValue()) {
                replicas.add(cluster.getMaterializedReplica(i, new ExtendedBlock(poolId, b)));
            }
        }
    }
    return replicas;
}
Also used : MaterializedReplica(org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica) ArrayList(java.util.ArrayList) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) StringContains.containsString(org.hamcrest.core.StringContains.containsString) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Map(java.util.Map)

Example 98 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestDataNodeHotSwapVolumes method testRemoveVolumeBeingWrittenForDatanode.

/**
   * Test the case that remove a data volume on a particular DataNode when the
   * volume is actively being written.
   * @param dataNodeIdx the index of the DataNode to remove a volume.
   */
private void testRemoveVolumeBeingWrittenForDatanode(int dataNodeIdx) throws IOException, ReconfigurationException, TimeoutException, InterruptedException, BrokenBarrierException {
    // Starts DFS cluster with 3 DataNodes to form a pipeline.
    startDFSCluster(1, 3);
    final short REPLICATION = 3;
    final DataNode dn = cluster.getDataNodes().get(dataNodeIdx);
    final FileSystem fs = cluster.getFileSystem();
    final Path testFile = new Path("/test");
    FSDataOutputStream out = fs.create(testFile, REPLICATION);
    Random rb = new Random(0);
    // half of the block.
    byte[] writeBuf = new byte[BLOCK_SIZE / 2];
    rb.nextBytes(writeBuf);
    out.write(writeBuf);
    out.hflush();
    // Make FsDatasetSpi#finalizeBlock a time-consuming operation. So if the
    // BlockReceiver releases volume reference before finalizeBlock(), the blocks
    // on the volume will be removed, and finalizeBlock() throws IOE.
    final FsDatasetSpi<? extends FsVolumeSpi> data = dn.data;
    dn.data = Mockito.spy(data);
    doAnswer(new Answer<Object>() {

        public Object answer(InvocationOnMock invocation) throws IOException, InterruptedException {
            Thread.sleep(1000);
            // Bypass the argument to FsDatasetImpl#finalizeBlock to verify that
            // the block is not removed, since the volume reference should not
            // be released at this point.
            data.finalizeBlock((ExtendedBlock) invocation.getArguments()[0]);
            return null;
        }
    }).when(dn.data).finalizeBlock(any(ExtendedBlock.class));
    final CyclicBarrier barrier = new CyclicBarrier(2);
    List<String> oldDirs = getDataDirs(dn);
    // Remove the first volume.
    final String newDirs = oldDirs.get(1);
    final List<Exception> exceptions = new ArrayList<>();
    Thread reconfigThread = new Thread() {

        public void run() {
            try {
                barrier.await();
                assertThat("DN did not update its own config", dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newDirs), is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
            } catch (ReconfigurationException | InterruptedException | BrokenBarrierException e) {
                exceptions.add(e);
            }
        }
    };
    reconfigThread.start();
    barrier.await();
    rb.nextBytes(writeBuf);
    out.write(writeBuf);
    out.hflush();
    out.close();
    reconfigThread.join();
    // Verify if the data directory reconfigure was successful
    FsDatasetSpi<? extends FsVolumeSpi> fsDatasetSpi = dn.getFSDataset();
    try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi.getFsVolumeReferences()) {
        for (int i = 0; i < fsVolumeReferences.size(); i++) {
            System.out.println("Vol: " + fsVolumeReferences.get(i).getBaseURI().toString());
        }
        assertEquals("Volume remove wasn't successful.", 1, fsVolumeReferences.size());
    }
    // Verify the file has sufficient replications.
    DFSTestUtil.waitReplication(fs, testFile, REPLICATION);
    // Read the content back
    byte[] content = DFSTestUtil.readFileBuffer(fs, testFile);
    assertEquals(BLOCK_SIZE, content.length);
    if (!exceptions.isEmpty()) {
        throw new IOException(exceptions.get(0).getCause());
    }
}
Also used : BrokenBarrierException(java.util.concurrent.BrokenBarrierException) ArrayList(java.util.ArrayList) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Matchers.anyString(org.mockito.Matchers.anyString) Random(java.util.Random) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ReconfigurationException(org.apache.hadoop.conf.ReconfigurationException) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Path(org.apache.hadoop.fs.Path) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException) TimeoutException(java.util.concurrent.TimeoutException) BlockMissingException(org.apache.hadoop.hdfs.BlockMissingException) IOException(java.io.IOException) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) ReconfigurationException(org.apache.hadoop.conf.ReconfigurationException) CyclicBarrier(java.util.concurrent.CyclicBarrier) InvocationOnMock(org.mockito.invocation.InvocationOnMock)

Example 99 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestDataNodeVolumeMetrics method verifyDataNodeVolumeMetrics.

private void verifyDataNodeVolumeMetrics(final FileSystem fs, final MiniDFSCluster cluster, final Path fileName) throws IOException {
    List<DataNode> datanodes = cluster.getDataNodes();
    DataNode datanode = datanodes.get(0);
    final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    final FsVolumeSpi volume = datanode.getFSDataset().getVolume(block);
    DataNodeVolumeMetrics metrics = volume.getMetrics();
    MetricsRecordBuilder rb = getMetrics(volume.getMetrics().name());
    assertCounter("TotalDataFileIos", metrics.getTotalDataFileIos(), rb);
    LOG.info("TotalMetadataOperations : " + metrics.getTotalMetadataOperations());
    LOG.info("TotalDataFileIos : " + metrics.getTotalDataFileIos());
    LOG.info("TotalFileIoErrors : " + metrics.getTotalFileIoErrors());
    LOG.info("MetadataOperationSampleCount : " + metrics.getMetadataOperationSampleCount());
    LOG.info("MetadataOperationMean : " + metrics.getMetadataOperationMean());
    LOG.info("MetadataFileIoStdDev : " + metrics.getMetadataOperationStdDev());
    LOG.info("DataFileIoSampleCount : " + metrics.getDataFileIoSampleCount());
    LOG.info("DataFileIoMean : " + metrics.getDataFileIoMean());
    LOG.info("DataFileIoStdDev : " + metrics.getDataFileIoStdDev());
    LOG.info("flushIoSampleCount : " + metrics.getFlushIoSampleCount());
    LOG.info("flushIoMean : " + metrics.getFlushIoMean());
    LOG.info("flushIoStdDev : " + metrics.getFlushIoStdDev());
    LOG.info("syncIoSampleCount : " + metrics.getSyncIoSampleCount());
    LOG.info("syncIoMean : " + metrics.getSyncIoMean());
    LOG.info("syncIoStdDev : " + metrics.getSyncIoStdDev());
    LOG.info("readIoSampleCount : " + metrics.getReadIoMean());
    LOG.info("readIoMean : " + metrics.getReadIoMean());
    LOG.info("readIoStdDev : " + metrics.getReadIoStdDev());
    LOG.info("writeIoSampleCount : " + metrics.getWriteIoSampleCount());
    LOG.info("writeIoMean : " + metrics.getWriteIoMean());
    LOG.info("writeIoStdDev : " + metrics.getWriteIoStdDev());
    LOG.info("fileIoErrorSampleCount : " + metrics.getFileIoErrorSampleCount());
    LOG.info("fileIoErrorMean : " + metrics.getFileIoErrorMean());
    LOG.info("fileIoErrorStdDev : " + metrics.getFileIoErrorStdDev());
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) DataNodeVolumeMetrics(org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder)

Example 100 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestDataNodeMetrics method testDatanodeBlocksReplicatedMetric.

@Test
public void testDatanodeBlocksReplicatedMetric() throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        FileSystem fs = cluster.getFileSystem();
        List<DataNode> datanodes = cluster.getDataNodes();
        assertEquals(datanodes.size(), 1);
        DataNode datanode = datanodes.get(0);
        MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
        long blocksReplicated = getLongCounter("BlocksReplicated", rb);
        assertEquals("No blocks replicated yet", 0, blocksReplicated);
        Path path = new Path("/counter.txt");
        DFSTestUtil.createFile(fs, path, 1024, (short) 2, Time.monotonicNow());
        cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR, null);
        ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fs, path);
        DFSTestUtil.waitForReplication(cluster, firstBlock, 1, 2, 0);
        MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name());
        blocksReplicated = getLongCounter("BlocksReplicated", rbNew);
        assertEquals("blocks replicated counter incremented", 1, blocksReplicated);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Aggregations

ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)208 Test (org.junit.Test)124 Path (org.apache.hadoop.fs.Path)91 Configuration (org.apache.hadoop.conf.Configuration)71 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)63 FileSystem (org.apache.hadoop.fs.FileSystem)62 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)55 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)53 IOException (java.io.IOException)41 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)41 Block (org.apache.hadoop.hdfs.protocol.Block)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)34 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)32 File (java.io.File)22 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)18 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)18 InetSocketAddress (java.net.InetSocketAddress)17 ArrayList (java.util.ArrayList)17