Search in sources :

Example 1 with PipelineAck

use of org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck in project hadoop by apache.

the class TestDataTransferProtocol method testDataTransferProtocol.

@Test
public void testDataTransferProtocol() throws IOException {
    Random random = new Random();
    int oneMil = 1024 * 1024;
    Path file = new Path("dataprotocol.dat");
    int numDataNodes = 1;
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
    try {
        cluster.waitActive();
        datanode = cluster.getFileSystem().getDataNodeStats(DatanodeReportType.LIVE)[0];
        dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
        FileSystem fileSys = cluster.getFileSystem();
        int fileLen = Math.min(conf.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096), 4096);
        DFSTestUtil.createFile(fileSys, file, fileLen, fileLen, fileSys.getDefaultBlockSize(file), fileSys.getDefaultReplication(file), 0L);
        // get the first blockid for the file
        final ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
        final String poolId = firstBlock.getBlockPoolId();
        long newBlockId = firstBlock.getBlockId() + 1;
        recvBuf.reset();
        sendBuf.reset();
        // bad version
        recvOut.writeShort((short) (DataTransferProtocol.DATA_TRANSFER_VERSION - 1));
        sendOut.writeShort((short) (DataTransferProtocol.DATA_TRANSFER_VERSION - 1));
        sendRecvData("Wrong Version", true);
        // bad ops
        sendBuf.reset();
        sendOut.writeShort((short) DataTransferProtocol.DATA_TRANSFER_VERSION);
        sendOut.writeByte(Op.WRITE_BLOCK.code - 1);
        sendRecvData("Wrong Op Code", true);
        /* Test OP_WRITE_BLOCK */
        sendBuf.reset();
        DataChecksum badChecksum = Mockito.spy(DEFAULT_CHECKSUM);
        Mockito.doReturn(-1).when(badChecksum).getBytesPerChecksum();
        writeBlock(poolId, newBlockId, badChecksum);
        recvBuf.reset();
        sendResponse(Status.ERROR, null, null, recvOut);
        sendRecvData("wrong bytesPerChecksum while writing", true);
        sendBuf.reset();
        recvBuf.reset();
        writeBlock(poolId, ++newBlockId, DEFAULT_CHECKSUM);
        PacketHeader hdr = new PacketHeader(// size of packet
        4, // offset in block,
        0, // seqno
        100, // last packet
        false, // bad datalen
        -1 - random.nextInt(oneMil), false);
        hdr.write(sendOut);
        sendResponse(Status.SUCCESS, "", null, recvOut);
        new PipelineAck(100, new int[] { PipelineAck.combineHeader(PipelineAck.ECN.DISABLED, Status.ERROR) }).write(recvOut);
        sendRecvData("negative DATA_CHUNK len while writing block " + newBlockId, true);
        // test for writing a valid zero size block
        sendBuf.reset();
        recvBuf.reset();
        writeBlock(poolId, ++newBlockId, DEFAULT_CHECKSUM);
        hdr = new PacketHeader(// size of packet
        8, // OffsetInBlock
        0, // sequencenumber
        100, // lastPacketInBlock
        true, // chunk length
        0, false);
        hdr.write(sendOut);
        // zero checksum
        sendOut.writeInt(0);
        sendOut.flush();
        //ok finally write a block with 0 len
        sendResponse(Status.SUCCESS, "", null, recvOut);
        new PipelineAck(100, new int[] { PipelineAck.combineHeader(PipelineAck.ECN.DISABLED, Status.SUCCESS) }).write(recvOut);
        sendRecvData("Writing a zero len block blockid " + newBlockId, false);
        /* Test OP_READ_BLOCK */
        String bpid = cluster.getNamesystem().getBlockPoolId();
        ExtendedBlock blk = new ExtendedBlock(bpid, firstBlock.getLocalBlock());
        long blkid = blk.getBlockId();
        // bad block id
        sendBuf.reset();
        recvBuf.reset();
        blk.setBlockId(blkid - 1);
        sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", 0L, fileLen, true, CachingStrategy.newDefaultStrategy());
        sendRecvData("Wrong block ID " + newBlockId + " for read", false);
        // negative block start offset -1L
        sendBuf.reset();
        blk.setBlockId(blkid);
        sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", -1L, fileLen, true, CachingStrategy.newDefaultStrategy());
        sendRecvData("Negative start-offset for read for block " + firstBlock.getBlockId(), false);
        // bad block start offset
        sendBuf.reset();
        sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", fileLen, fileLen, true, CachingStrategy.newDefaultStrategy());
        sendRecvData("Wrong start-offset for reading block " + firstBlock.getBlockId(), false);
        // negative length is ok. Datanode assumes we want to read the whole block.
        recvBuf.reset();
        BlockOpResponseProto.newBuilder().setStatus(Status.SUCCESS).setReadOpChecksumInfo(ReadOpChecksumInfoProto.newBuilder().setChecksum(DataTransferProtoUtil.toProto(DEFAULT_CHECKSUM)).setChunkOffset(0L)).build().writeDelimitedTo(recvOut);
        sendBuf.reset();
        sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", 0L, -1L - random.nextInt(oneMil), true, CachingStrategy.newDefaultStrategy());
        sendRecvData("Negative length for reading block " + firstBlock.getBlockId(), false);
        // length is more than size of block.
        recvBuf.reset();
        sendResponse(Status.ERROR, null, "opReadBlock " + firstBlock + " received exception java.io.IOException:  " + "Offset 0 and length 4097 don't match block " + firstBlock + " ( blockLen 4096 )", recvOut);
        sendBuf.reset();
        sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", 0L, fileLen + 1, true, CachingStrategy.newDefaultStrategy());
        sendRecvData("Wrong length for reading block " + firstBlock.getBlockId(), false);
        //At the end of all this, read the file to make sure that succeeds finally.
        sendBuf.reset();
        sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", 0L, fileLen, true, CachingStrategy.newDefaultStrategy());
        readFile(fileSys, file, fileLen);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) Builder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) PipelineAck(org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck) DataChecksum(org.apache.hadoop.util.DataChecksum) Random(java.util.Random) FileSystem(org.apache.hadoop.fs.FileSystem) PacketHeader(org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader) Test(org.junit.Test)

Example 2 with PipelineAck

use of org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck in project hadoop by apache.

the class TestDataTransferProtocol method writeZeroLengthPacket.

private void writeZeroLengthPacket(ExtendedBlock block, String description) throws IOException {
    PacketHeader hdr = new PacketHeader(// size of packet
    8, // OffsetInBlock
    block.getNumBytes(), // sequencenumber
    100, // lastPacketInBlock
    true, // chunk length
    0, // sync block
    false);
    hdr.write(sendOut);
    // zero checksum
    sendOut.writeInt(0);
    //ok finally write a block with 0 len
    sendResponse(Status.SUCCESS, "", null, recvOut);
    new PipelineAck(100, new int[] { PipelineAck.combineHeader(PipelineAck.ECN.DISABLED, Status.SUCCESS) }).write(recvOut);
    sendRecvData(description, false);
}
Also used : PacketHeader(org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader) PipelineAck(org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck)

Example 3 with PipelineAck

use of org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck in project hadoop by apache.

the class TestDataTransferProtocol method TestPipeLineAckCompatibility.

@Test
public void TestPipeLineAckCompatibility() throws IOException {
    DataTransferProtos.PipelineAckProto proto = DataTransferProtos.PipelineAckProto.newBuilder().setSeqno(0).addReply(Status.CHECKSUM_OK).build();
    DataTransferProtos.PipelineAckProto newProto = DataTransferProtos.PipelineAckProto.newBuilder().mergeFrom(proto).addFlag(PipelineAck.combineHeader(PipelineAck.ECN.SUPPORTED, Status.CHECKSUM_OK)).build();
    ByteArrayOutputStream oldAckBytes = new ByteArrayOutputStream();
    proto.writeDelimitedTo(oldAckBytes);
    PipelineAck oldAck = new PipelineAck();
    oldAck.readFields(new ByteArrayInputStream(oldAckBytes.toByteArray()));
    assertEquals(PipelineAck.combineHeader(PipelineAck.ECN.DISABLED, Status.CHECKSUM_OK), oldAck.getHeaderFlag(0));
    PipelineAck newAck = new PipelineAck();
    ByteArrayOutputStream newAckBytes = new ByteArrayOutputStream();
    newProto.writeDelimitedTo(newAckBytes);
    newAck.readFields(new ByteArrayInputStream(newAckBytes.toByteArray()));
    assertEquals(PipelineAck.combineHeader(PipelineAck.ECN.SUPPORTED, Status.CHECKSUM_OK), newAck.getHeaderFlag(0));
}
Also used : ByteArrayInputStream(java.io.ByteArrayInputStream) DataTransferProtos(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos) ByteArrayOutputStream(java.io.ByteArrayOutputStream) PipelineAck(org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck) Test(org.junit.Test)

Aggregations

PipelineAck (org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck)3 PacketHeader (org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader)2 Test (org.junit.Test)2 ByteArrayInputStream (java.io.ByteArrayInputStream)1 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 Random (java.util.Random)1 Configuration (org.apache.hadoop.conf.Configuration)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)1 DataTransferProtos (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos)1 Builder (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder)1 DataChecksum (org.apache.hadoop.util.DataChecksum)1