Search in sources :

Example 1 with ReplicaOutputStreams

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams in project hadoop by apache.

the class TestFileAppend method testConcurrentAppendRead.

@Test(timeout = 10000)
public void testConcurrentAppendRead() throws IOException, TimeoutException, InterruptedException {
    // Create a finalized replica and append to it
    // Read block data and checksum. Verify checksum.
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    conf.setInt("dfs.min.replication", 1);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        DataNode dn = cluster.getDataNodes().get(0);
        FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
        // create a file with 1 byte of data.
        long initialFileLength = 1;
        DistributedFileSystem fs = cluster.getFileSystem();
        Path fileName = new Path("/appendCorruptBlock");
        DFSTestUtil.createFile(fs, fileName, initialFileLength, (short) 1, 0);
        DFSTestUtil.waitReplication(fs, fileName, (short) 1);
        Assert.assertTrue("File not created", fs.exists(fileName));
        // Call FsDatasetImpl#append to append the block file,
        // which converts it to a rbw replica.
        ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
        long newGS = block.getGenerationStamp() + 1;
        ReplicaHandler replicaHandler = dataSet.append(block, newGS, initialFileLength);
        // write data to block file
        ReplicaBeingWritten rbw = (ReplicaBeingWritten) replicaHandler.getReplica();
        ReplicaOutputStreams outputStreams = rbw.createStreams(false, DEFAULT_CHECKSUM);
        OutputStream dataOutput = outputStreams.getDataOut();
        byte[] appendBytes = new byte[1];
        dataOutput.write(appendBytes, 0, 1);
        dataOutput.flush();
        dataOutput.close();
        // update checksum file
        final int smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
        FsDatasetUtil.computeChecksum(rbw.getMetaFile(), rbw.getMetaFile(), rbw.getBlockFile(), smallBufferSize, conf);
        // read the block
        // the DataNode BlockSender should read from the rbw replica's in-memory
        // checksum, rather than on-disk checksum. Otherwise it will see a
        // checksum mismatch error.
        final byte[] readBlock = DFSTestUtil.readFileBuffer(fs, fileName);
        assertEquals("should have read only one byte!", 1, readBlock.length);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ReplicaBeingWritten(org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) ReplicaOutputStreams(org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams) Test(org.junit.Test)

Example 2 with ReplicaOutputStreams

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams in project hadoop by apache.

the class LocalReplicaInPipeline method createStreams.

// ReplicaInPipeline
@Override
public ReplicaOutputStreams createStreams(boolean isCreate, DataChecksum requestedChecksum) throws IOException {
    final File blockFile = getBlockFile();
    final File metaFile = getMetaFile();
    if (DataNode.LOG.isDebugEnabled()) {
        DataNode.LOG.debug("writeTo blockfile is " + blockFile + " of size " + blockFile.length());
        DataNode.LOG.debug("writeTo metafile is " + metaFile + " of size " + metaFile.length());
    }
    long blockDiskSize = 0L;
    long crcDiskSize = 0L;
    // the checksum that should actually be used -- this
    // may differ from requestedChecksum for appends.
    final DataChecksum checksum;
    final RandomAccessFile metaRAF = getFileIoProvider().getRandomAccessFile(getVolume(), metaFile, "rw");
    if (!isCreate) {
        // For append or recovery, we must enforce the existing checksum.
        // Also, verify that the file has correct lengths, etc.
        boolean checkedMeta = false;
        try {
            BlockMetadataHeader header = BlockMetadataHeader.readHeader(metaRAF);
            checksum = header.getChecksum();
            if (checksum.getBytesPerChecksum() != requestedChecksum.getBytesPerChecksum()) {
                throw new IOException("Client requested checksum " + requestedChecksum + " when appending to an existing block " + "with different chunk size: " + checksum);
            }
            int bytesPerChunk = checksum.getBytesPerChecksum();
            int checksumSize = checksum.getChecksumSize();
            blockDiskSize = bytesOnDisk;
            crcDiskSize = BlockMetadataHeader.getHeaderSize() + (blockDiskSize + bytesPerChunk - 1) / bytesPerChunk * checksumSize;
            if (blockDiskSize > 0 && (blockDiskSize > blockFile.length() || crcDiskSize > metaFile.length())) {
                throw new IOException("Corrupted block: " + this);
            }
            checkedMeta = true;
        } finally {
            if (!checkedMeta) {
                // clean up in case of exceptions.
                IOUtils.closeStream(metaRAF);
            }
        }
    } else {
        // for create, we can use the requested checksum
        checksum = requestedChecksum;
    }
    final FileIoProvider fileIoProvider = getFileIoProvider();
    FileOutputStream blockOut = null;
    FileOutputStream crcOut = null;
    try {
        blockOut = fileIoProvider.getFileOutputStream(getVolume(), new RandomAccessFile(blockFile, "rw").getFD());
        crcOut = fileIoProvider.getFileOutputStream(getVolume(), metaRAF.getFD());
        if (!isCreate) {
            blockOut.getChannel().position(blockDiskSize);
            crcOut.getChannel().position(crcDiskSize);
        }
        return new ReplicaOutputStreams(blockOut, crcOut, checksum, getVolume(), fileIoProvider);
    } catch (IOException e) {
        IOUtils.closeStream(blockOut);
        IOUtils.closeStream(crcOut);
        IOUtils.closeStream(metaRAF);
        throw e;
    }
}
Also used : RandomAccessFile(java.io.RandomAccessFile) FileOutputStream(java.io.FileOutputStream) IOException(java.io.IOException) ReplicaOutputStreams(org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) DataChecksum(org.apache.hadoop.util.DataChecksum)

Example 3 with ReplicaOutputStreams

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams in project hadoop by apache.

the class TestBlockRecovery method testNotMatchedReplicaID.

/**
   * BlockRecoveryFI_11. a replica's recovery id does not match new GS.
   *
   * @throws IOException in case of an error
   */
@Test(timeout = 60000)
public void testNotMatchedReplicaID() throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    ReplicaInPipeline replicaInfo = dn.data.createRbw(StorageType.DEFAULT, block, false).getReplica();
    ReplicaOutputStreams streams = null;
    try {
        streams = replicaInfo.createStreams(true, DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
        streams.getChecksumOut().write('a');
        dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID + 1));
        BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous = recoveryWorker.new RecoveryTaskContiguous(rBlock);
        try {
            RecoveryTaskContiguous.syncBlock(initBlockRecords(dn));
            fail("Sync should fail");
        } catch (IOException e) {
            e.getMessage().startsWith("Cannot recover ");
        }
        DatanodeProtocol namenode = recoveryWorker.getActiveNamenodeForBP(POOL_ID);
        verify(namenode, never()).commitBlockSynchronization(any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(), anyBoolean(), any(DatanodeID[].class), any(String[].class));
    } finally {
        streams.close();
    }
}
Also used : RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ReplicaOutputStreams(org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams) IOException(java.io.IOException) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) DatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol) Test(org.junit.Test)

Example 4 with ReplicaOutputStreams

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams in project hadoop by apache.

the class TestSimulatedFSDataset method addSomeBlocks.

static int addSomeBlocks(SimulatedFSDataset fsdataset, long startingBlockId, boolean negativeBlkID) throws IOException {
    int bytesAdded = 0;
    for (long i = startingBlockId; i < startingBlockId + NUMBLOCKS; ++i) {
        long blkID = negativeBlkID ? i * -1 : i;
        ExtendedBlock b = new ExtendedBlock(bpid, blkID, 0, 0);
        // we pass expected len as zero, - fsdataset should use the sizeof actual
        // data written
        ReplicaInPipeline bInfo = fsdataset.createRbw(StorageType.DEFAULT, b, false).getReplica();
        ReplicaOutputStreams out = bInfo.createStreams(true, DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
        try {
            OutputStream dataOut = out.getDataOut();
            assertEquals(0, fsdataset.getLength(b));
            for (int j = 1; j <= blockIdToLen(i); ++j) {
                dataOut.write(j);
                // correct length even as we write
                assertEquals(j, bInfo.getBytesOnDisk());
                bytesAdded++;
            }
        } finally {
            out.close();
        }
        b.setNumBytes(blockIdToLen(i));
        fsdataset.finalizeBlock(b);
        assertEquals(blockIdToLen(i), fsdataset.getLength(b));
    }
    return bytesAdded;
}
Also used : OutputStream(java.io.OutputStream) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ReplicaOutputStreams(org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams)

Aggregations

ReplicaOutputStreams (org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams)4 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 IOException (java.io.IOException)2 OutputStream (java.io.OutputStream)2 Test (org.junit.Test)2 File (java.io.File)1 FileOutputStream (java.io.FileOutputStream)1 RandomAccessFile (java.io.RandomAccessFile)1 Configuration (org.apache.hadoop.conf.Configuration)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 Path (org.apache.hadoop.fs.Path)1 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)1 ReplicaBeingWritten (org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten)1 ReplicaHandler (org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)1 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)1 DatanodeProtocol (org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol)1 InterDatanodeProtocol (org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol)1 DataChecksum (org.apache.hadoop.util.DataChecksum)1