Search in sources :

Example 6 with Checksum

use of org.apache.hadoop.ozone.common.Checksum in project ozone by apache.

the class TestKeyValueContainerIntegrityChecks method createContainerWithBlocks.

/**
 * Creates a container with normal and deleted blocks.
 * First it will insert normal blocks, and then it will insert
 * deleted blocks.
 */
protected KeyValueContainer createContainerWithBlocks(long containerId, int normalBlocks, int deletedBlocks) throws Exception {
    String strBlock = "block";
    String strChunk = "-chunkFile";
    long totalBlocks = normalBlocks + deletedBlocks;
    int bytesPerChecksum = 2 * UNIT_LEN;
    Checksum checksum = new Checksum(ContainerProtos.ChecksumType.SHA256, bytesPerChecksum);
    byte[] chunkData = RandomStringUtils.randomAscii(CHUNK_LEN).getBytes(UTF_8);
    ChecksumData checksumData = checksum.computeChecksum(chunkData);
    DispatcherContext writeStage = new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build();
    DispatcherContext commitStage = new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build();
    KeyValueContainerData containerData = new KeyValueContainerData(containerId, containerLayoutTestInfo.getLayout(), (long) CHUNKS_PER_BLOCK * CHUNK_LEN * totalBlocks, UUID.randomUUID().toString(), UUID.randomUUID().toString());
    KeyValueContainer container = new KeyValueContainer(containerData, conf);
    container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString());
    try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, conf)) {
        assertNotNull(containerData.getChunksPath());
        File chunksPath = new File(containerData.getChunksPath());
        containerLayoutTestInfo.validateFileCount(chunksPath, 0, 0);
        List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
        for (int i = 0; i < totalBlocks; i++) {
            BlockID blockID = new BlockID(containerId, i);
            BlockData blockData = new BlockData(blockID);
            chunkList.clear();
            for (long chunkCount = 0; chunkCount < CHUNKS_PER_BLOCK; chunkCount++) {
                String chunkName = strBlock + i + strChunk + chunkCount;
                long offset = chunkCount * CHUNK_LEN;
                ChunkInfo info = new ChunkInfo(chunkName, offset, CHUNK_LEN);
                info.setChecksumData(checksumData);
                chunkList.add(info.getProtoBufMessage());
                chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), writeStage);
                chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), commitStage);
            }
            blockData.setChunks(chunkList);
            // normal key
            String key = Long.toString(blockID.getLocalID());
            if (i >= normalBlocks) {
                // deleted key
                key = OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID();
            }
            metadataStore.getStore().getBlockDataTable().put(key, blockData);
        }
        containerLayoutTestInfo.validateFileCount(chunksPath, totalBlocks, totalBlocks * CHUNKS_PER_BLOCK);
    }
    return container;
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) DispatcherContext(org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext) ArrayList(java.util.ArrayList) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) Checksum(org.apache.hadoop.ozone.common.Checksum) BlockID(org.apache.hadoop.hdds.client.BlockID) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) File(java.io.File)

Example 7 with Checksum

use of org.apache.hadoop.ozone.common.Checksum in project ozone by apache.

the class TestBlockInputStream method setup.

@Before
public void setup() throws Exception {
    BlockID blockID = new BlockID(new ContainerBlockID(1, 1));
    checksum = new Checksum(ChecksumType.NONE, CHUNK_SIZE);
    createChunkList(5);
    blockStream = new DummyBlockInputStream(blockID, blockSize, null, null, false, null, refreshPipeline, chunks, chunkDataMap);
}
Also used : Checksum(org.apache.hadoop.ozone.common.Checksum) BlockID(org.apache.hadoop.hdds.client.BlockID) ContainerBlockID(org.apache.hadoop.hdds.client.ContainerBlockID) ContainerBlockID(org.apache.hadoop.hdds.client.ContainerBlockID) Before(org.junit.Before)

Example 8 with Checksum

use of org.apache.hadoop.ozone.common.Checksum in project ozone by apache.

the class ContainerProtocolCalls method writeSmallFile.

/**
 * Allows writing a small file using single RPC. This takes the container
 * name, block name and data to write sends all that data to the container
 * using a single RPC. This API is designed to be used for files which are
 * smaller than 1 MB.
 *
 * @param client - client that communicates with the container.
 * @param blockID - ID of the block
 * @param data - Data to be written into the container.
 * @param token a token for this block (may be null)
 * @return container protocol writeSmallFile response
 * @throws IOException
 */
public static PutSmallFileResponseProto writeSmallFile(XceiverClientSpi client, BlockID blockID, byte[] data, Token<OzoneBlockTokenIdentifier> token) throws IOException {
    BlockData containerBlockData = BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf()).build();
    PutBlockRequestProto.Builder createBlockRequest = PutBlockRequestProto.newBuilder().setBlockData(containerBlockData);
    KeyValue keyValue = KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true").build();
    Checksum checksum = new Checksum(ChecksumType.CRC32, 256);
    final ChecksumData checksumData = checksum.computeChecksum(data);
    ChunkInfo chunk = ChunkInfo.newBuilder().setChunkName(blockID.getLocalID() + "_chunk").setOffset(0).setLen(data.length).addMetadata(keyValue).setChecksumData(checksumData.getProtoBufMessage()).build();
    PutSmallFileRequestProto putSmallFileRequest = PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk).setBlock(createBlockRequest).setData(ByteString.copyFrom(data)).build();
    String id = client.getPipeline().getFirstNode().getUuidString();
    ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutSmallFile).setContainerID(blockID.getContainerID()).setDatanodeUuid(id).setPutSmallFile(putSmallFileRequest);
    if (token != null) {
        builder.setEncodedToken(token.encodeToUrlString());
    }
    ContainerCommandRequestProto request = builder.build();
    ContainerCommandResponseProto response = client.sendCommand(request, getValidatorList());
    return response.getPutSmallFile();
}
Also used : PutSmallFileRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto) KeyValue(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue) ChunkInfo(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo) ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) PutBlockRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutBlockRequestProto) Checksum(org.apache.hadoop.ozone.common.Checksum) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) BlockData(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)

Example 9 with Checksum

use of org.apache.hadoop.ozone.common.Checksum in project ozone by apache.

the class TestContainerPersistence method testWritReadManyChunks.

/**
 * Writes many chunks of the same block into different chunk files and
 * verifies that we have that data in many files.
 *
 * @throws IOException
 * @throws NoSuchAlgorithmException
 */
@Test
public void testWritReadManyChunks() throws IOException {
    final int datalen = 1024;
    final int chunkCount = 1024;
    long testContainerID = getTestContainerID();
    Container container = addContainer(containerSet, testContainerID);
    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
    List<ChunkInfo> chunks = new ArrayList<>(chunkCount);
    BlockData blockData = new BlockData(blockID);
    for (int x = 0; x < chunkCount; x++) {
        ChunkInfo info = getChunk(blockID.getLocalID(), x, x * datalen, datalen);
        ChunkBuffer data = getData(datalen);
        setDataChecksum(info, data);
        chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
        chunks.add(info);
        blockData.addChunk(info.getProtoBufMessage());
    }
    blockManager.putBlock(container, blockData);
    KeyValueContainerData cNewData = (KeyValueContainerData) container.getContainerData();
    Assert.assertNotNull(cNewData);
    Path dataDir = Paths.get(cNewData.getChunksPath());
    // Read chunk via file system and verify.
    Checksum checksum = new Checksum(ChecksumType.CRC32, 1024 * 1024);
    // Read chunk via ReadChunk call.
    for (int x = 0; x < chunkCount; x++) {
        ChunkInfo info = chunks.get(x);
        ChunkBuffer data = chunkManager.readChunk(container, blockID, info, getDispatcherContext());
        ChecksumData checksumData = checksum.computeChecksum(data);
        Assert.assertEquals(info.getChecksumData(), checksumData);
    }
}
Also used : Path(java.nio.file.Path) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) ArrayList(java.util.ArrayList) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) ContainerTestHelper.setDataChecksum(org.apache.hadoop.ozone.container.ContainerTestHelper.setDataChecksum) Checksum(org.apache.hadoop.ozone.common.Checksum) BlockID(org.apache.hadoop.hdds.client.BlockID) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) Test(org.junit.Test)

Example 10 with Checksum

use of org.apache.hadoop.ozone.common.Checksum in project ozone by apache.

the class DatanodeChunkGenerator method runTest.

private void runTest() throws IOException {
    timer = getMetrics().timer("chunk-write");
    byte[] data = RandomStringUtils.randomAscii(chunkSize).getBytes(StandardCharsets.UTF_8);
    dataToWrite = ByteString.copyFrom(data);
    Checksum checksum = new Checksum(ChecksumType.CRC32, chunkSize);
    checksumProtobuf = checksum.computeChecksum(data).getProtoBufMessage();
    runTests(this::writeChunk);
}
Also used : Checksum(org.apache.hadoop.ozone.common.Checksum)

Aggregations

Checksum (org.apache.hadoop.ozone.common.Checksum)12 ChecksumData (org.apache.hadoop.ozone.common.ChecksumData)5 BlockID (org.apache.hadoop.hdds.client.BlockID)4 ArrayList (java.util.ArrayList)3 ContainerCommandResponseProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)3 File (java.io.File)2 Path (java.nio.file.Path)2 BlockData (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData)2 ChunkInfo (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo)2 ContainerCommandRequestProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto)2 KeyValue (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue)2 OzoneClientConfig (org.apache.hadoop.hdds.scm.OzoneClientConfig)2 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)2 ChunkBuffer (org.apache.hadoop.ozone.common.ChunkBuffer)2 OzoneChecksumException (org.apache.hadoop.ozone.common.OzoneChecksumException)2 BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)2 ChunkInfo (org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)2 RoundRobinVolumeChoosingPolicy (org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy)2 ByteString (org.apache.ratis.thirdparty.com.google.protobuf.ByteString)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1