Search in sources :

Example 1 with ChecksumData

use of org.apache.hadoop.ozone.common.ChecksumData in project ozone by apache.

the class DatanodeChunkValidator method validateChunk.

private void validateChunk(long stepNo) throws Exception {
    ContainerCommandRequestProto request = createReadChunkRequest(stepNo);
    timer.time(() -> {
        try {
            ContainerCommandResponseProto response = xceiverClient.sendCommand(request);
            ChecksumData checksumOfChunk = computeChecksum(response);
            if (!checksumReference.equals(checksumOfChunk)) {
                throw new IllegalStateException("Reference (=first) message checksum doesn't match " + "with checksum of chunk " + response.getReadChunk().getChunkData().getChunkName());
            }
        } catch (IOException e) {
            LOG.warn("Could not read chunk due to IOException: ", e);
        }
    });
}
Also used : ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) IOException(java.io.IOException) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)

Example 2 with ChecksumData

use of org.apache.hadoop.ozone.common.ChecksumData in project ozone by apache.

the class KeyValueContainerCheck method verifyChecksum.

private static void verifyChecksum(BlockData block, ContainerProtos.ChunkInfo chunk, File chunkFile, ContainerLayoutVersion layout, DataTransferThrottler throttler, Canceler canceler) throws IOException {
    ChecksumData checksumData = ChecksumData.getFromProtoBuf(chunk.getChecksumData());
    int checksumCount = checksumData.getChecksums().size();
    int bytesPerChecksum = checksumData.getBytesPerChecksum();
    Checksum cal = new Checksum(checksumData.getChecksumType(), bytesPerChecksum);
    ByteBuffer buffer = ByteBuffer.allocate(bytesPerChecksum);
    long bytesRead = 0;
    try (FileChannel channel = FileChannel.open(chunkFile.toPath(), ChunkUtils.READ_OPTIONS, ChunkUtils.NO_ATTRIBUTES)) {
        if (layout == ContainerLayoutVersion.FILE_PER_BLOCK) {
            channel.position(chunk.getOffset());
        }
        for (int i = 0; i < checksumCount; i++) {
            // limit last read for FILE_PER_BLOCK, to avoid reading next chunk
            if (layout == ContainerLayoutVersion.FILE_PER_BLOCK && i == checksumCount - 1 && chunk.getLen() % bytesPerChecksum != 0) {
                buffer.limit((int) (chunk.getLen() % bytesPerChecksum));
            }
            int v = channel.read(buffer);
            if (v == -1) {
                break;
            }
            bytesRead += v;
            buffer.flip();
            throttler.throttle(v, canceler);
            ByteString expected = checksumData.getChecksums().get(i);
            ByteString actual = cal.computeChecksum(buffer).getChecksums().get(0);
            if (!expected.equals(actual)) {
                throw new OzoneChecksumException(String.format("Inconsistent read for chunk=%s" + " checksum item %d" + " expected checksum %s" + " actual checksum %s" + " for block %s", ChunkInfo.getFromProtoBuf(chunk), i, Arrays.toString(expected.toByteArray()), Arrays.toString(actual.toByteArray()), block.getBlockID()));
            }
        }
        if (bytesRead != chunk.getLen()) {
            throw new OzoneChecksumException(String.format("Inconsistent read for chunk=%s expected length=%d" + " actual length=%d for block %s", chunk.getChunkName(), chunk.getLen(), bytesRead, block.getBlockID()));
        }
    }
}
Also used : ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) OzoneChecksumException(org.apache.hadoop.ozone.common.OzoneChecksumException) Checksum(org.apache.hadoop.ozone.common.Checksum) FileChannel(java.nio.channels.FileChannel) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) ByteBuffer(java.nio.ByteBuffer)

Example 3 with ChecksumData

use of org.apache.hadoop.ozone.common.ChecksumData in project ozone by apache.

the class TestKeyValueContainerIntegrityChecks method createContainerWithBlocks.

/**
 * Creates a container with normal and deleted blocks.
 * First it will insert normal blocks, and then it will insert
 * deleted blocks.
 */
protected KeyValueContainer createContainerWithBlocks(long containerId, int normalBlocks, int deletedBlocks) throws Exception {
    String strBlock = "block";
    String strChunk = "-chunkFile";
    long totalBlocks = normalBlocks + deletedBlocks;
    int bytesPerChecksum = 2 * UNIT_LEN;
    Checksum checksum = new Checksum(ContainerProtos.ChecksumType.SHA256, bytesPerChecksum);
    byte[] chunkData = RandomStringUtils.randomAscii(CHUNK_LEN).getBytes(UTF_8);
    ChecksumData checksumData = checksum.computeChecksum(chunkData);
    DispatcherContext writeStage = new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build();
    DispatcherContext commitStage = new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build();
    KeyValueContainerData containerData = new KeyValueContainerData(containerId, containerLayoutTestInfo.getLayout(), (long) CHUNKS_PER_BLOCK * CHUNK_LEN * totalBlocks, UUID.randomUUID().toString(), UUID.randomUUID().toString());
    KeyValueContainer container = new KeyValueContainer(containerData, conf);
    container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString());
    try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, conf)) {
        assertNotNull(containerData.getChunksPath());
        File chunksPath = new File(containerData.getChunksPath());
        containerLayoutTestInfo.validateFileCount(chunksPath, 0, 0);
        List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
        for (int i = 0; i < totalBlocks; i++) {
            BlockID blockID = new BlockID(containerId, i);
            BlockData blockData = new BlockData(blockID);
            chunkList.clear();
            for (long chunkCount = 0; chunkCount < CHUNKS_PER_BLOCK; chunkCount++) {
                String chunkName = strBlock + i + strChunk + chunkCount;
                long offset = chunkCount * CHUNK_LEN;
                ChunkInfo info = new ChunkInfo(chunkName, offset, CHUNK_LEN);
                info.setChecksumData(checksumData);
                chunkList.add(info.getProtoBufMessage());
                chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), writeStage);
                chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), commitStage);
            }
            blockData.setChunks(chunkList);
            // normal key
            String key = Long.toString(blockID.getLocalID());
            if (i >= normalBlocks) {
                // deleted key
                key = OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID();
            }
            metadataStore.getStore().getBlockDataTable().put(key, blockData);
        }
        containerLayoutTestInfo.validateFileCount(chunksPath, totalBlocks, totalBlocks * CHUNKS_PER_BLOCK);
    }
    return container;
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) DispatcherContext(org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext) ArrayList(java.util.ArrayList) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) Checksum(org.apache.hadoop.ozone.common.Checksum) BlockID(org.apache.hadoop.hdds.client.BlockID) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) File(java.io.File)

Example 4 with ChecksumData

use of org.apache.hadoop.ozone.common.ChecksumData in project ozone by apache.

the class ContainerProtocolCalls method writeSmallFile.

/**
 * Allows writing a small file using single RPC. This takes the container
 * name, block name and data to write sends all that data to the container
 * using a single RPC. This API is designed to be used for files which are
 * smaller than 1 MB.
 *
 * @param client - client that communicates with the container.
 * @param blockID - ID of the block
 * @param data - Data to be written into the container.
 * @param token a token for this block (may be null)
 * @return container protocol writeSmallFile response
 * @throws IOException
 */
public static PutSmallFileResponseProto writeSmallFile(XceiverClientSpi client, BlockID blockID, byte[] data, Token<OzoneBlockTokenIdentifier> token) throws IOException {
    BlockData containerBlockData = BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf()).build();
    PutBlockRequestProto.Builder createBlockRequest = PutBlockRequestProto.newBuilder().setBlockData(containerBlockData);
    KeyValue keyValue = KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true").build();
    Checksum checksum = new Checksum(ChecksumType.CRC32, 256);
    final ChecksumData checksumData = checksum.computeChecksum(data);
    ChunkInfo chunk = ChunkInfo.newBuilder().setChunkName(blockID.getLocalID() + "_chunk").setOffset(0).setLen(data.length).addMetadata(keyValue).setChecksumData(checksumData.getProtoBufMessage()).build();
    PutSmallFileRequestProto putSmallFileRequest = PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk).setBlock(createBlockRequest).setData(ByteString.copyFrom(data)).build();
    String id = client.getPipeline().getFirstNode().getUuidString();
    ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutSmallFile).setContainerID(blockID.getContainerID()).setDatanodeUuid(id).setPutSmallFile(putSmallFileRequest);
    if (token != null) {
        builder.setEncodedToken(token.encodeToUrlString());
    }
    ContainerCommandRequestProto request = builder.build();
    ContainerCommandResponseProto response = client.sendCommand(request, getValidatorList());
    return response.getPutSmallFile();
}
Also used : PutSmallFileRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto) KeyValue(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue) ChunkInfo(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo) ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) PutBlockRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutBlockRequestProto) Checksum(org.apache.hadoop.ozone.common.Checksum) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) BlockData(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)

Example 5 with ChecksumData

use of org.apache.hadoop.ozone.common.ChecksumData in project ozone by apache.

the class TestContainerPersistence method testWritReadManyChunks.

/**
 * Writes many chunks of the same block into different chunk files and
 * verifies that we have that data in many files.
 *
 * @throws IOException
 * @throws NoSuchAlgorithmException
 */
@Test
public void testWritReadManyChunks() throws IOException {
    final int datalen = 1024;
    final int chunkCount = 1024;
    long testContainerID = getTestContainerID();
    Container container = addContainer(containerSet, testContainerID);
    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
    List<ChunkInfo> chunks = new ArrayList<>(chunkCount);
    BlockData blockData = new BlockData(blockID);
    for (int x = 0; x < chunkCount; x++) {
        ChunkInfo info = getChunk(blockID.getLocalID(), x, x * datalen, datalen);
        ChunkBuffer data = getData(datalen);
        setDataChecksum(info, data);
        chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
        chunks.add(info);
        blockData.addChunk(info.getProtoBufMessage());
    }
    blockManager.putBlock(container, blockData);
    KeyValueContainerData cNewData = (KeyValueContainerData) container.getContainerData();
    Assert.assertNotNull(cNewData);
    Path dataDir = Paths.get(cNewData.getChunksPath());
    // Read chunk via file system and verify.
    Checksum checksum = new Checksum(ChecksumType.CRC32, 1024 * 1024);
    // Read chunk via ReadChunk call.
    for (int x = 0; x < chunkCount; x++) {
        ChunkInfo info = chunks.get(x);
        ChunkBuffer data = chunkManager.readChunk(container, blockID, info, getDispatcherContext());
        ChecksumData checksumData = checksum.computeChecksum(data);
        Assert.assertEquals(info.getChecksumData(), checksumData);
    }
}
Also used : Path(java.nio.file.Path) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) ArrayList(java.util.ArrayList) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) ContainerTestHelper.setDataChecksum(org.apache.hadoop.ozone.container.ContainerTestHelper.setDataChecksum) Checksum(org.apache.hadoop.ozone.common.Checksum) BlockID(org.apache.hadoop.hdds.client.BlockID) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) Test(org.junit.Test)

Aggregations

ChecksumData (org.apache.hadoop.ozone.common.ChecksumData)6 Checksum (org.apache.hadoop.ozone.common.Checksum)5 ArrayList (java.util.ArrayList)3 BlockID (org.apache.hadoop.hdds.client.BlockID)3 ContainerCommandResponseProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)3 IOException (java.io.IOException)2 BlockData (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData)2 ChunkInfo (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo)2 ContainerCommandRequestProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto)2 KeyValue (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue)2 ChunkBuffer (org.apache.hadoop.ozone.common.ChunkBuffer)2 OzoneChecksumException (org.apache.hadoop.ozone.common.OzoneChecksumException)2 BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)2 ChunkInfo (org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)2 ByteString (org.apache.ratis.thirdparty.com.google.protobuf.ByteString)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 Preconditions (com.google.common.base.Preconditions)1 File (java.io.File)1 OutputStream (java.io.OutputStream)1 ByteBuffer (java.nio.ByteBuffer)1