Search in sources :

Example 26 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class TestContainerPersistence method testOverWrite.

/**
 * Writes a single chunk and tries to overwrite that chunk without over write
 * flag then re-tries with overwrite flag.
 *
 * @throws IOException
 * @throws NoSuchAlgorithmException
 */
@Test
public void testOverWrite() throws IOException, NoSuchAlgorithmException {
    final int datalen = 1024;
    long testContainerID = getTestContainerID();
    KeyValueContainer container = addContainer(containerSet, testContainerID);
    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
    ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
    ChunkBuffer data = getData(datalen);
    setDataChecksum(info, data);
    chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
    data.rewind();
    chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
    data.rewind();
    // With the overwrite flag it should work now.
    info.addMetadata(OzoneConsts.CHUNK_OVERWRITE, "true");
    chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
    long bytesUsed = container.getContainerData().getBytesUsed();
    Assert.assertEquals(datalen, bytesUsed);
    long bytesWrite = container.getContainerData().getWriteBytes();
    Assert.assertEquals(datalen * 3, bytesWrite);
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Test(org.junit.Test)

Example 27 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class TestBlockManagerImpl method testListBlock.

@Test
public void testListBlock() throws Exception {
    blockManager.putBlock(keyValueContainer, blockData);
    List<BlockData> listBlockData = blockManager.listBlock(keyValueContainer, 1, 10);
    assertNotNull(listBlockData);
    assertTrue(listBlockData.size() == 1);
    for (long i = 2; i <= 10; i++) {
        blockID = new BlockID(1L, i);
        blockData = new BlockData(blockID);
        blockData.addMetadata(OzoneConsts.VOLUME, OzoneConsts.OZONE);
        blockData.addMetadata(OzoneConsts.OWNER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
        List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
        ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID.getLocalID(), 0), 0, 1024);
        chunkList.add(info.getProtoBufMessage());
        blockData.setChunks(chunkList);
        blockManager.putBlock(keyValueContainer, blockData);
    }
    listBlockData = blockManager.listBlock(keyValueContainer, 1, 10);
    assertNotNull(listBlockData);
    assertTrue(listBlockData.size() == 10);
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ArrayList(java.util.ArrayList) BlockID(org.apache.hadoop.hdds.client.BlockID) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) Test(org.junit.Test)

Example 28 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class TestFilePerBlockStrategy method testDeletePartialChunkWithOffsetUnsupportedRequest.

@Test
public void testDeletePartialChunkWithOffsetUnsupportedRequest() {
    // GIVEN
    ChunkManager chunkManager = createTestSubject();
    try {
        KeyValueContainer container = getKeyValueContainer();
        BlockID blockID = getBlockID();
        chunkManager.writeChunk(container, blockID, getChunkInfo(), getData(), getDispatcherContext());
        ChunkInfo chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID.getLocalID(), 0), 123, getChunkInfo().getLen());
        // WHEN
        chunkManager.deleteChunk(container, blockID, chunkInfo);
        // THEN
        fail("testDeleteChunkUnsupportedRequest");
    } catch (StorageContainerException ex) {
        assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, ex.getResult());
    }
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) ChunkManager(org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Test(org.junit.Test)

Example 29 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class TestFilePerBlockStrategy method testPartialRead.

/**
 * Test partial within a single chunk.
 */
@Test
public void testPartialRead() throws Exception {
    final int datalen = 1024;
    final int start = datalen / 4;
    final int length = datalen / 2;
    KeyValueContainer container = getKeyValueContainer();
    BlockID blockID = getBlockID();
    ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
    ChunkBuffer data = ContainerTestHelper.getData(datalen);
    setDataChecksum(info, data);
    DispatcherContext ctx = getDispatcherContext();
    ChunkManager subject = createTestSubject();
    subject.writeChunk(container, blockID, info, data, ctx);
    ChunkBuffer readData = subject.readChunk(container, blockID, info, ctx);
    // data will be ChunkBufferImplWithByteBuffer and readData will return
    // ChunkBufferImplWithByteBufferList. Hence, convert both ByteStrings
    // before comparing.
    assertEquals(data.rewind().toByteString(), readData.rewind().toByteString());
    ChunkInfo info2 = getChunk(blockID.getLocalID(), 0, start, length);
    ChunkBuffer readData2 = subject.readChunk(container, blockID, info2, ctx);
    assertEquals(length, info2.getLen());
    assertEquals(data.rewind().toByteString().substring(start, start + length), readData2.rewind().toByteString());
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) DispatcherContext(org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext) BlockID(org.apache.hadoop.hdds.client.BlockID) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) ChunkManager(org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager) Test(org.junit.Test)

Example 30 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class TestFilePerBlockStrategy method testMultipleWriteSingleRead.

/**
 * This test writes data as many small writes and tries to read back the data
 * in a single large read.
 */
@Test
public void testMultipleWriteSingleRead() throws Exception {
    final int datalen = 1024;
    final int chunkCount = 1024;
    KeyValueContainer container = getKeyValueContainer();
    BlockID blockID = getBlockID();
    MessageDigest oldSha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
    ChunkManager subject = createTestSubject();
    for (int x = 0; x < chunkCount; x++) {
        // we are writing to the same chunk file but at different offsets.
        long offset = x * datalen;
        ChunkInfo info = getChunk(blockID.getLocalID(), 0, offset, datalen);
        ChunkBuffer data = ContainerTestHelper.getData(datalen);
        oldSha.update(data.toByteString().asReadOnlyByteBuffer());
        data.rewind();
        setDataChecksum(info, data);
        subject.writeChunk(container, blockID, info, data, getDispatcherContext());
    }
    // Request to read the whole data in a single go.
    ChunkInfo largeChunk = getChunk(blockID.getLocalID(), 0, 0, datalen * chunkCount);
    ChunkBuffer chunk = subject.readChunk(container, blockID, largeChunk, getDispatcherContext());
    ByteBuffer newdata = chunk.toByteString().asReadOnlyByteBuffer();
    MessageDigest newSha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
    newSha.update(newdata);
    assertEquals(Hex.encodeHexString(oldSha.digest()), Hex.encodeHexString(newSha.digest()));
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) MessageDigest(java.security.MessageDigest) ByteBuffer(java.nio.ByteBuffer) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) ChunkManager(org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager) Test(org.junit.Test)

Aggregations

ChunkInfo (org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)40 BlockID (org.apache.hadoop.hdds.client.BlockID)28 KeyValueContainer (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)19 Test (org.junit.Test)19 BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)18 ChunkBuffer (org.apache.hadoop.ozone.common.ChunkBuffer)14 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)11 ArrayList (java.util.ArrayList)10 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)10 ChunkManager (org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager)10 DispatcherContext (org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext)8 File (java.io.File)7 IOException (java.io.IOException)7 LinkedList (java.util.LinkedList)6 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)6 ByteBuffer (java.nio.ByteBuffer)5 ReferenceCountedDB (org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)5 KeyValueContainerData (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)4 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)3 RoundRobinVolumeChoosingPolicy (org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy)3