Search in sources :

Example 6 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class TestOzoneContainer method addBlocks.

private long addBlocks(KeyValueContainer container, int blocks, int chunksPerBlock) throws Exception {
    String strBlock = "block";
    String strChunk = "-chunkFile";
    int datalen = 65536;
    long usedBytes = 0;
    long freeBytes = container.getContainerData().getMaxSize();
    long containerId = container.getContainerData().getContainerID();
    ReferenceCountedDB db = BlockUtils.getDB(container.getContainerData(), conf);
    Table<String, Long> metadataTable = db.getStore().getMetadataTable();
    Table<String, BlockData> blockDataTable = db.getStore().getBlockDataTable();
    for (int bi = 0; bi < blocks; bi++) {
        // Creating BlockData
        BlockID blockID = new BlockID(containerId, bi);
        BlockData blockData = new BlockData(blockID);
        List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
        chunkList.clear();
        for (int ci = 0; ci < chunksPerBlock; ci++) {
            String chunkName = strBlock + bi + strChunk + ci;
            long offset = ci * (long) datalen;
            ChunkInfo info = new ChunkInfo(chunkName, offset, datalen);
            usedBytes += datalen;
            chunkList.add(info.getProtoBufMessage());
        }
        blockData.setChunks(chunkList);
        blockDataTable.put(Long.toString(blockID.getLocalID()), blockData);
    }
    // Set Block count and used bytes.
    metadataTable.put(OzoneConsts.BLOCK_COUNT, (long) blocks);
    metadataTable.put(OzoneConsts.CONTAINER_BYTES_USED, usedBytes);
    // remaining available capacity of the container
    db.close();
    return (freeBytes - usedBytes);
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ArrayList(java.util.ArrayList) BlockID(org.apache.hadoop.hdds.client.BlockID) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)

Example 7 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class TestKeyValueContainerIntegrityChecks method createContainerWithBlocks.

/**
 * Creates a container with normal and deleted blocks.
 * First it will insert normal blocks, and then it will insert
 * deleted blocks.
 */
protected KeyValueContainer createContainerWithBlocks(long containerId, int normalBlocks, int deletedBlocks) throws Exception {
    String strBlock = "block";
    String strChunk = "-chunkFile";
    long totalBlocks = normalBlocks + deletedBlocks;
    int bytesPerChecksum = 2 * UNIT_LEN;
    Checksum checksum = new Checksum(ContainerProtos.ChecksumType.SHA256, bytesPerChecksum);
    byte[] chunkData = RandomStringUtils.randomAscii(CHUNK_LEN).getBytes(UTF_8);
    ChecksumData checksumData = checksum.computeChecksum(chunkData);
    DispatcherContext writeStage = new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build();
    DispatcherContext commitStage = new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build();
    KeyValueContainerData containerData = new KeyValueContainerData(containerId, containerLayoutTestInfo.getLayout(), (long) CHUNKS_PER_BLOCK * CHUNK_LEN * totalBlocks, UUID.randomUUID().toString(), UUID.randomUUID().toString());
    KeyValueContainer container = new KeyValueContainer(containerData, conf);
    container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString());
    try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, conf)) {
        assertNotNull(containerData.getChunksPath());
        File chunksPath = new File(containerData.getChunksPath());
        containerLayoutTestInfo.validateFileCount(chunksPath, 0, 0);
        List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
        for (int i = 0; i < totalBlocks; i++) {
            BlockID blockID = new BlockID(containerId, i);
            BlockData blockData = new BlockData(blockID);
            chunkList.clear();
            for (long chunkCount = 0; chunkCount < CHUNKS_PER_BLOCK; chunkCount++) {
                String chunkName = strBlock + i + strChunk + chunkCount;
                long offset = chunkCount * CHUNK_LEN;
                ChunkInfo info = new ChunkInfo(chunkName, offset, CHUNK_LEN);
                info.setChecksumData(checksumData);
                chunkList.add(info.getProtoBufMessage());
                chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), writeStage);
                chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), commitStage);
            }
            blockData.setChunks(chunkList);
            // normal key
            String key = Long.toString(blockID.getLocalID());
            if (i >= normalBlocks) {
                // deleted key
                key = OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID();
            }
            metadataStore.getStore().getBlockDataTable().put(key, blockData);
        }
        containerLayoutTestInfo.validateFileCount(chunksPath, totalBlocks, totalBlocks * CHUNKS_PER_BLOCK);
    }
    return container;
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) DispatcherContext(org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext) ArrayList(java.util.ArrayList) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) Checksum(org.apache.hadoop.ozone.common.Checksum) BlockID(org.apache.hadoop.hdds.client.BlockID) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) File(java.io.File)

Example 8 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class TestContainerPersistence method testPutBlockWithInvalidBCSId.

/**
 * Tests a put block and read block with invalid bcsId.
 *
 * @throws IOException
 * @throws NoSuchAlgorithmException
 */
@Test
public void testPutBlockWithInvalidBCSId() throws IOException, NoSuchAlgorithmException {
    long testContainerID = getTestContainerID();
    Container container = addContainer(containerSet, testContainerID);
    BlockID blockID1 = ContainerTestHelper.getTestBlockID(testContainerID);
    ChunkInfo info = writeChunkHelper(blockID1);
    BlockData blockData = new BlockData(blockID1);
    List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
    chunkList.add(info.getProtoBufMessage());
    blockData.setChunks(chunkList);
    blockData.setBlockCommitSequenceId(3);
    blockManager.putBlock(container, blockData);
    chunkList.clear();
    // write a 2nd block
    BlockID blockID2 = ContainerTestHelper.getTestBlockID(testContainerID);
    info = writeChunkHelper(blockID2);
    blockData = new BlockData(blockID2);
    chunkList.add(info.getProtoBufMessage());
    blockData.setChunks(chunkList);
    blockData.setBlockCommitSequenceId(4);
    blockManager.putBlock(container, blockData);
    BlockData readBlockData;
    try {
        blockID1.setBlockCommitSequenceId(5);
        // read with bcsId higher than container bcsId
        blockManager.getBlock(container, blockID1);
        Assert.fail("Expected exception not thrown");
    } catch (StorageContainerException sce) {
        Assert.assertTrue(sce.getResult() == UNKNOWN_BCSID);
    }
    try {
        blockID1.setBlockCommitSequenceId(4);
        // read with bcsId lower than container bcsId but greater than committed
        // bcsId.
        blockManager.getBlock(container, blockID1);
        Assert.fail("Expected exception not thrown");
    } catch (StorageContainerException sce) {
        Assert.assertTrue(sce.getResult() == BCSID_MISMATCH);
    }
    readBlockData = blockManager.getBlock(container, blockData.getBlockID());
    ChunkInfo readChunk = ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(0));
    Assert.assertEquals(info.getChecksumData(), readChunk.getChecksumData());
}
Also used : KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) LinkedList(java.util.LinkedList) Test(org.junit.Test)

Example 9 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class TestFilePerChunkStrategy method deletesChunkFileWithLengthIncludingOffset.

/**
 * Tests that "new datanode" can delete chunks written to "old
 * datanode" by "new client" (ie. where chunk file accidentally created with
 * {@code size = chunk offset + chunk length}, instead of only chunk length).
 */
@Test
public void deletesChunkFileWithLengthIncludingOffset() throws Exception {
    // GIVEN
    ChunkManager chunkManager = createTestSubject();
    KeyValueContainer container = getKeyValueContainer();
    BlockID blockID = getBlockID();
    ChunkInfo chunkInfo = getChunkInfo();
    long offset = 1024;
    ChunkInfo oldDatanodeChunkInfo = new ChunkInfo(chunkInfo.getChunkName(), offset, chunkInfo.getLen());
    File file = ContainerLayoutVersion.FILE_PER_CHUNK.getChunkFile(container.getContainerData(), blockID, chunkInfo);
    ChunkUtils.writeData(file, ChunkBuffer.wrap(getData()), offset, chunkInfo.getLen(), null, true);
    checkChunkFileCount(1);
    assertTrue(file.exists());
    assertEquals(offset + chunkInfo.getLen(), file.length());
    // WHEN
    chunkManager.deleteChunk(container, blockID, oldDatanodeChunkInfo);
    // THEN
    checkChunkFileCount(0);
    assertFalse(file.exists());
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) File(java.io.File) ChunkManager(org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Test(org.junit.Test)

Example 10 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class AbstractTestChunkManager method setUp.

@Before
public final void setUp() throws Exception {
    OzoneConfiguration config = new OzoneConfiguration();
    getStrategy().updateConfig(config);
    UUID datanodeId = UUID.randomUUID();
    hddsVolume = new HddsVolume.Builder(folder.getRoot().getAbsolutePath()).conf(config).datanodeUuid(datanodeId.toString()).build();
    VolumeSet volumeSet = mock(MutableVolumeSet.class);
    RoundRobinVolumeChoosingPolicy volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())).thenReturn(hddsVolume);
    keyValueContainerData = new KeyValueContainerData(1L, ContainerLayoutVersion.getConfiguredVersion(config), (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString());
    keyValueContainer = new KeyValueContainer(keyValueContainerData, config);
    keyValueContainer.create(volumeSet, volumeChoosingPolicy, UUID.randomUUID().toString());
    header = "my header".getBytes(UTF_8);
    byte[] bytes = "testing write chunks".getBytes(UTF_8);
    data = ByteBuffer.allocate(header.length + bytes.length).put(header).put(bytes);
    rewindBufferToDataStart();
    // Creating BlockData
    blockID = new BlockID(1L, 1L);
    chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID.getLocalID(), 0), 0, bytes.length);
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) BlockID(org.apache.hadoop.hdds.client.BlockID) UUID(java.util.UUID) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Before(org.junit.Before)

Aggregations

ChunkInfo (org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)40 BlockID (org.apache.hadoop.hdds.client.BlockID)28 KeyValueContainer (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)19 Test (org.junit.Test)19 BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)18 ChunkBuffer (org.apache.hadoop.ozone.common.ChunkBuffer)14 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)11 ArrayList (java.util.ArrayList)10 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)10 ChunkManager (org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager)10 DispatcherContext (org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext)8 File (java.io.File)7 IOException (java.io.IOException)7 LinkedList (java.util.LinkedList)6 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)6 ByteBuffer (java.nio.ByteBuffer)5 ReferenceCountedDB (org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)5 KeyValueContainerData (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)4 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)3 RoundRobinVolumeChoosingPolicy (org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy)3