Search in sources :

Example 6 with KeyValueContainer

use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer in project ozone by apache.

the class TestHddsDispatcher method testContainerCloseActionWhenFull.

@Test
public void testContainerCloseActionWhenFull() throws IOException {
    String testDir = GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName());
    OzoneConfiguration conf = new OzoneConfiguration();
    conf.set(HDDS_DATANODE_DIR_KEY, testDir);
    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
    DatanodeDetails dd = randomDatanodeDetails();
    MutableVolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
    try {
        UUID scmId = UUID.randomUUID();
        ContainerSet containerSet = new ContainerSet();
        DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
        StateContext context = Mockito.mock(StateContext.class);
        Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
        Mockito.when(context.getParent()).thenReturn(stateMachine);
        KeyValueContainerData containerData = new KeyValueContainerData(1L, layout, (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), dd.getUuidString());
        Container container = new KeyValueContainer(containerData, conf);
        container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString());
        containerSet.addContainer(container);
        ContainerMetrics metrics = ContainerMetrics.create(conf);
        Map<ContainerType, Handler> handlers = Maps.newHashMap();
        for (ContainerType containerType : ContainerType.values()) {
            handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
        }
        HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
        hddsDispatcher.setClusterId(scmId.toString());
        ContainerCommandResponseProto responseOne = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseOne.getResult());
        verify(context, times(0)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
        containerData.setBytesUsed(Double.valueOf(StorageUnit.MB.toBytes(950)).longValue());
        ContainerCommandResponseProto responseTwo = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 2L), null);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseTwo.getResult());
        verify(context, times(1)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
    } finally {
        volumeSet.shutdown();
        ContainerMetrics.remove();
        FileUtils.deleteDirectory(new File(testDir));
    }
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) ContainerAction(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) UUID(java.util.UUID) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) File(java.io.File) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Test(org.junit.Test)

Example 7 with KeyValueContainer

use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer in project ozone by apache.

the class TestFilePerChunkStrategy method deletesChunkFileWithLengthIncludingOffset.

/**
 * Tests that "new datanode" can delete chunks written to "old
 * datanode" by "new client" (ie. where chunk file accidentally created with
 * {@code size = chunk offset + chunk length}, instead of only chunk length).
 */
@Test
public void deletesChunkFileWithLengthIncludingOffset() throws Exception {
    // GIVEN
    ChunkManager chunkManager = createTestSubject();
    KeyValueContainer container = getKeyValueContainer();
    BlockID blockID = getBlockID();
    ChunkInfo chunkInfo = getChunkInfo();
    long offset = 1024;
    ChunkInfo oldDatanodeChunkInfo = new ChunkInfo(chunkInfo.getChunkName(), offset, chunkInfo.getLen());
    File file = ContainerLayoutVersion.FILE_PER_CHUNK.getChunkFile(container.getContainerData(), blockID, chunkInfo);
    ChunkUtils.writeData(file, ChunkBuffer.wrap(getData()), offset, chunkInfo.getLen(), null, true);
    checkChunkFileCount(1);
    assertTrue(file.exists());
    assertEquals(offset + chunkInfo.getLen(), file.length());
    // WHEN
    chunkManager.deleteChunk(container, blockID, oldDatanodeChunkInfo);
    // THEN
    checkChunkFileCount(0);
    assertFalse(file.exists());
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) File(java.io.File) ChunkManager(org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Test(org.junit.Test)

Example 8 with KeyValueContainer

use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer in project ozone by apache.

the class TestContainerReader method setup.

@Before
public void setup() throws Exception {
    File volumeDir = tempDir.newFolder();
    volumeSet = Mockito.mock(MutableVolumeSet.class);
    containerSet = new ContainerSet();
    conf = new OzoneConfiguration();
    datanodeId = UUID.randomUUID();
    hddsVolume = new HddsVolume.Builder(volumeDir.getAbsolutePath()).conf(conf).datanodeUuid(datanodeId.toString()).clusterID(clusterId).build();
    volumeSet = mock(MutableVolumeSet.class);
    volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())).thenReturn(hddsVolume);
    for (int i = 0; i < 2; i++) {
        KeyValueContainerData keyValueContainerData = new KeyValueContainerData(i, ContainerLayoutVersion.FILE_PER_BLOCK, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString());
        KeyValueContainer keyValueContainer = new KeyValueContainer(keyValueContainerData, conf);
        keyValueContainer.create(volumeSet, volumeChoosingPolicy, clusterId);
        List<Long> blkNames;
        if (i % 2 == 0) {
            blkNames = addBlocks(keyValueContainer, true);
            markBlocksForDelete(keyValueContainer, true, blkNames, i);
        } else {
            blkNames = addBlocks(keyValueContainer, false);
            markBlocksForDelete(keyValueContainer, false, blkNames, i);
        }
        // Close the RocksDB instance for this container and remove from the cache
        // so it does not affect the ContainerReader, which avoids using the cache
        // at startup for performance reasons.
        BlockUtils.removeDB(keyValueContainerData, conf);
    }
}
Also used : HddsVolume(org.apache.hadoop.ozone.container.common.volume.HddsVolume) RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) File(java.io.File) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Before(org.junit.Before)

Example 9 with KeyValueContainer

use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer in project ozone by apache.

the class AbstractTestChunkManager method setUp.

@Before
public final void setUp() throws Exception {
    OzoneConfiguration config = new OzoneConfiguration();
    getStrategy().updateConfig(config);
    UUID datanodeId = UUID.randomUUID();
    hddsVolume = new HddsVolume.Builder(folder.getRoot().getAbsolutePath()).conf(config).datanodeUuid(datanodeId.toString()).build();
    VolumeSet volumeSet = mock(MutableVolumeSet.class);
    RoundRobinVolumeChoosingPolicy volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())).thenReturn(hddsVolume);
    keyValueContainerData = new KeyValueContainerData(1L, ContainerLayoutVersion.getConfiguredVersion(config), (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString());
    keyValueContainer = new KeyValueContainer(keyValueContainerData, config);
    keyValueContainer.create(volumeSet, volumeChoosingPolicy, UUID.randomUUID().toString());
    header = "my header".getBytes(UTF_8);
    byte[] bytes = "testing write chunks".getBytes(UTF_8);
    data = ByteBuffer.allocate(header.length + bytes.length).put(header).put(bytes);
    rewindBufferToDataStart();
    // Creating BlockData
    blockID = new BlockID(1L, 1L);
    chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID.getLocalID(), 0), 0, bytes.length);
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) BlockID(org.apache.hadoop.hdds.client.BlockID) UUID(java.util.UUID) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Before(org.junit.Before)

Example 10 with KeyValueContainer

use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer in project ozone by apache.

the class TestBlockManagerImpl method setUp.

@Before
public void setUp() throws Exception {
    config = new OzoneConfiguration();
    UUID datanodeId = UUID.randomUUID();
    HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot().getAbsolutePath()).conf(config).datanodeUuid(datanodeId.toString()).build();
    volumeSet = mock(MutableVolumeSet.class);
    volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())).thenReturn(hddsVolume);
    keyValueContainerData = new KeyValueContainerData(1L, layout, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString());
    keyValueContainer = new KeyValueContainer(keyValueContainerData, config);
    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
    // Creating BlockData
    blockID = new BlockID(1L, 1L);
    blockData = new BlockData(blockID);
    blockData.addMetadata(OzoneConsts.VOLUME, OzoneConsts.OZONE);
    blockData.addMetadata(OzoneConsts.OWNER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
    List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
    ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID.getLocalID(), 0), 0, 1024);
    chunkList.add(info.getProtoBufMessage());
    blockData.setChunks(chunkList);
    // Creating BlockData
    blockID1 = new BlockID(1L, 2L);
    blockData1 = new BlockData(blockID1);
    blockData1.addMetadata(OzoneConsts.VOLUME, OzoneConsts.OZONE);
    blockData1.addMetadata(OzoneConsts.OWNER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
    List<ContainerProtos.ChunkInfo> chunkList1 = new ArrayList<>();
    ChunkInfo info1 = new ChunkInfo(String.format("%d.data.%d", blockID1.getLocalID(), 0), 0, 1024);
    chunkList1.add(info1.getProtoBufMessage());
    blockData1.setChunks(chunkList1);
    blockData1.setBlockCommitSequenceId(1);
    // Create KeyValueContainerManager
    blockManager = new BlockManagerImpl(config);
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ArrayList(java.util.ArrayList) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) HddsVolume(org.apache.hadoop.ozone.container.common.volume.HddsVolume) BlockID(org.apache.hadoop.hdds.client.BlockID) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) UUID(java.util.UUID) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Before(org.junit.Before)

Aggregations

KeyValueContainer (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)39 KeyValueContainerData (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)27 Test (org.junit.Test)22 File (java.io.File)13 ChunkInfo (org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)13 BlockID (org.apache.hadoop.hdds.client.BlockID)12 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)11 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)10 HddsVolume (org.apache.hadoop.ozone.container.common.volume.HddsVolume)10 ChunkManager (org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager)9 RoundRobinVolumeChoosingPolicy (org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy)8 DispatcherContext (org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext)7 MutableVolumeSet (org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet)7 ContainerSet (org.apache.hadoop.ozone.container.common.impl.ContainerSet)6 ByteBuffer (java.nio.ByteBuffer)5 BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)5 UUID (java.util.UUID)4 ChunkBuffer (org.apache.hadoop.ozone.common.ChunkBuffer)4 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)4 IOException (java.io.IOException)3