Search in sources :

Example 1 with VolumeSet

use of org.apache.hadoop.ozone.container.common.volume.VolumeSet in project ozone by apache.

the class ChunkManagerDiskWrite method call.

@Override
public Void call() throws Exception {
    try {
        init();
        OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
        VolumeSet volumeSet = new MutableVolumeSet("dnid", "clusterid", ozoneConfiguration, null, StorageVolume.VolumeType.DATA_VOLUME, null);
        Random random = new Random();
        VolumeChoosingPolicy volumeChoicePolicy = new RoundRobinVolumeChoosingPolicy();
        final int threadCount = getThreadNo();
        // create a dedicated (NEW) container for each thread
        for (int i = 1; i <= threadCount; i++) {
            // use a non-negative container id
            long containerId = random.nextLong() & 0x0F_FF_FF_FF_FF_FF_FF_FFL;
            KeyValueContainerData keyValueContainerData = new KeyValueContainerData(containerId, containerLayout, 1_000_000L, getPrefix(), "nodeid");
            KeyValueContainer keyValueContainer = new KeyValueContainer(keyValueContainerData, ozoneConfiguration);
            keyValueContainer.create(volumeSet, volumeChoicePolicy, "scmid");
            containersPerThread.put(i, keyValueContainer);
        }
        blockSize = chunkSize * chunksPerBlock;
        data = randomAscii(chunkSize).getBytes(UTF_8);
        chunkManager = ChunkManagerFactory.createChunkManager(ozoneConfiguration, null, null);
        timer = getMetrics().timer("chunk-write");
        LOG.info("Running chunk write test: threads={} chunkSize={} " + "chunksPerBlock={} layout={}", threadCount, chunkSize, chunksPerBlock, containerLayout);
        runTests(this::writeChunk);
    } finally {
        if (chunkManager != null) {
            chunkManager.shutdown();
        }
    }
    return null;
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) Random(java.util.Random) VolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy) RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)

Example 2 with VolumeSet

use of org.apache.hadoop.ozone.container.common.volume.VolumeSet in project ozone by apache.

the class TestHddsDispatcher method createDispatcher.

/**
 * Creates HddsDispatcher instance with given infos.
 * @param dd datanode detail info.
 * @param scmId UUID of scm id.
 * @param conf configuration be used.
 * @return HddsDispatcher HddsDispatcher instance.
 * @throws IOException
 */
private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException {
    ContainerSet containerSet = new ContainerSet();
    VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
    DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
    StateContext context = Mockito.mock(StateContext.class);
    Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
    Mockito.when(context.getParent()).thenReturn(stateMachine);
    ContainerMetrics metrics = ContainerMetrics.create(conf);
    Map<ContainerType, Handler> handlers = Maps.newHashMap();
    for (ContainerType containerType : ContainerType.values()) {
        handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
    }
    HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
    hddsDispatcher.setClusterId(scmId.toString());
    return hddsDispatcher;
}
Also used : ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet)

Example 3 with VolumeSet

use of org.apache.hadoop.ozone.container.common.volume.VolumeSet in project ozone by apache.

the class AbstractTestChunkManager method setUp.

@Before
public final void setUp() throws Exception {
    OzoneConfiguration config = new OzoneConfiguration();
    getStrategy().updateConfig(config);
    UUID datanodeId = UUID.randomUUID();
    hddsVolume = new HddsVolume.Builder(folder.getRoot().getAbsolutePath()).conf(config).datanodeUuid(datanodeId.toString()).build();
    VolumeSet volumeSet = mock(MutableVolumeSet.class);
    RoundRobinVolumeChoosingPolicy volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())).thenReturn(hddsVolume);
    keyValueContainerData = new KeyValueContainerData(1L, ContainerLayoutVersion.getConfiguredVersion(config), (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString());
    keyValueContainer = new KeyValueContainer(keyValueContainerData, config);
    keyValueContainer.create(volumeSet, volumeChoosingPolicy, UUID.randomUUID().toString());
    header = "my header".getBytes(UTF_8);
    byte[] bytes = "testing write chunks".getBytes(UTF_8);
    data = ByteBuffer.allocate(header.length + bytes.length).put(header).put(bytes);
    rewindBufferToDataStart();
    // Creating BlockData
    blockID = new BlockID(1L, 1L);
    chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID.getLocalID(), 0), 0, bytes.length);
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) BlockID(org.apache.hadoop.hdds.client.BlockID) UUID(java.util.UUID) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Before(org.junit.Before)

Example 4 with VolumeSet

use of org.apache.hadoop.ozone.container.common.volume.VolumeSet in project ozone by apache.

the class TestKeyValueHandler method testDeleteContainer.

@Test
public void testDeleteContainer() throws IOException {
    final String testDir = GenericTestUtils.getTempPath(TestKeyValueHandler.class.getSimpleName() + "-" + UUID.randomUUID().toString());
    try {
        final long containerID = 1L;
        final ConfigurationSource conf = new OzoneConfiguration();
        final ContainerSet containerSet = new ContainerSet();
        final VolumeSet volumeSet = Mockito.mock(VolumeSet.class);
        Mockito.when(volumeSet.getVolumesList()).thenReturn(Collections.singletonList(new HddsVolume.Builder(testDir).conf(conf).build()));
        final int[] interval = new int[1];
        interval[0] = 2;
        final ContainerMetrics metrics = new ContainerMetrics(interval);
        final AtomicInteger icrReceived = new AtomicInteger(0);
        final KeyValueHandler kvHandler = new KeyValueHandler(conf, UUID.randomUUID().toString(), containerSet, volumeSet, metrics, c -> icrReceived.incrementAndGet());
        kvHandler.setClusterID(UUID.randomUUID().toString());
        final ContainerCommandRequestProto createContainer = ContainerCommandRequestProto.newBuilder().setCmdType(ContainerProtos.Type.CreateContainer).setDatanodeUuid(UUID.randomUUID().toString()).setCreateContainer(ContainerProtos.CreateContainerRequestProto.newBuilder().setContainerType(ContainerType.KeyValueContainer).build()).setContainerID(containerID).setPipelineID(UUID.randomUUID().toString()).build();
        kvHandler.handleCreateContainer(createContainer, null);
        Assert.assertEquals(1, icrReceived.get());
        Assert.assertNotNull(containerSet.getContainer(containerID));
        kvHandler.deleteContainer(containerSet.getContainer(containerID), true);
        Assert.assertEquals(2, icrReceived.get());
        Assert.assertNull(containerSet.getContainer(containerID));
    } finally {
        FileUtils.deleteDirectory(new File(testDir));
    }
}
Also used : ConfigurationSource(org.apache.hadoop.hdds.conf.ConfigurationSource) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) File(java.io.File) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet) Test(org.junit.Test)

Example 5 with VolumeSet

use of org.apache.hadoop.ozone.container.common.volume.VolumeSet in project ozone by apache.

the class TestContainerMetrics method testContainerMetrics.

@Test
public void testContainerMetrics() throws Exception {
    XceiverServerGrpc server = null;
    XceiverClientGrpc client = null;
    long containerID = ContainerTestHelper.getTestContainerID();
    String path = GenericTestUtils.getRandomizedTempPath();
    try {
        final int interval = 1;
        Pipeline pipeline = MockPipeline.createSingleNodePipeline();
        OzoneConfiguration conf = new OzoneConfiguration();
        conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
        conf.setInt(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY, interval);
        DatanodeDetails datanodeDetails = randomDatanodeDetails();
        conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, path);
        conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, path);
        VolumeSet volumeSet = new MutableVolumeSet(datanodeDetails.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
        ContainerSet containerSet = new ContainerSet();
        DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
        StateContext context = Mockito.mock(StateContext.class);
        Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
        Mockito.when(context.getParent()).thenReturn(stateMachine);
        ContainerMetrics metrics = ContainerMetrics.create(conf);
        Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
        for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
            handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, c -> {
            }));
        }
        HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
        dispatcher.setClusterId(UUID.randomUUID().toString());
        server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, null);
        client = new XceiverClientGrpc(pipeline, conf);
        server.start();
        client.connect();
        // Create container
        ContainerCommandRequestProto request = ContainerTestHelper.getCreateContainerRequest(containerID, pipeline);
        ContainerCommandResponseProto response = client.sendCommand(request);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
        // Write Chunk
        BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
        ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024, null);
        ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024, null);
        response = client.sendCommand(writeChunkRequest);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
        // Read Chunk
        ContainerProtos.ContainerCommandRequestProto readChunkRequest = ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest.getWriteChunk());
        response = client.sendCommand(readChunkRequest);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
        MetricsRecordBuilder containerMetrics = getMetrics("StorageContainerMetrics");
        assertCounter("NumOps", 3L, containerMetrics);
        assertCounter("numCreateContainer", 1L, containerMetrics);
        assertCounter("numWriteChunk", 1L, containerMetrics);
        assertCounter("numReadChunk", 1L, containerMetrics);
        assertCounter("bytesWriteChunk", 1024L, containerMetrics);
        assertCounter("bytesReadChunk", 1024L, containerMetrics);
        String sec = interval + "s";
        Thread.sleep((interval + 1) * 1000);
        assertQuantileGauges("WriteChunkNanos" + sec, containerMetrics);
        // Check VolumeIOStats metrics
        List<HddsVolume> volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList());
        HddsVolume hddsVolume = volumes.get(0);
        MetricsRecordBuilder volumeIOMetrics = getMetrics(hddsVolume.getVolumeIOStats().getMetricsSourceName());
        assertCounter("ReadBytes", 1024L, volumeIOMetrics);
        assertCounter("ReadOpCount", 1L, volumeIOMetrics);
        assertCounter("WriteBytes", 1024L, volumeIOMetrics);
        assertCounter("WriteOpCount", 1L, volumeIOMetrics);
    } finally {
        if (client != null) {
            client.close();
        }
        if (server != null) {
            server.stop();
        }
        // clean up volume dir
        File file = new File(path);
        if (file.exists()) {
            FileUtil.fullyDelete(file);
        }
    }
}
Also used : ScmConfigKeys(org.apache.hadoop.hdds.scm.ScmConfigKeys) HddsDispatcher(org.apache.hadoop.ozone.container.common.impl.HddsDispatcher) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) MockPipeline(org.apache.hadoop.hdds.scm.pipeline.MockPipeline) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) BlockID(org.apache.hadoop.hdds.client.BlockID) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) MetricsAsserts.getMetrics(org.apache.hadoop.test.MetricsAsserts.getMetrics) HddsVolume(org.apache.hadoop.ozone.container.common.volume.HddsVolume) MetricsAsserts.assertCounter(org.apache.hadoop.test.MetricsAsserts.assertCounter) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Map(java.util.Map) Timeout(org.junit.rules.Timeout) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) StorageVolumeUtil(org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil) FileUtil(org.apache.hadoop.fs.FileUtil) ContainerTestHelper(org.apache.hadoop.ozone.container.ContainerTestHelper) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Test(org.junit.Test) UUID(java.util.UUID) XceiverServerGrpc(org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc) Maps(com.google.common.collect.Maps) File(java.io.File) XceiverClientGrpc(org.apache.hadoop.hdds.scm.XceiverClientGrpc) Mockito(org.mockito.Mockito) List(java.util.List) StorageVolume(org.apache.hadoop.ozone.container.common.volume.StorageVolume) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet) Rule(org.junit.Rule) MetricsAsserts.assertQuantileGauges(org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges) OzoneConfigKeys(org.apache.hadoop.ozone.OzoneConfigKeys) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) Assert(org.junit.Assert) GenericTestUtils(org.apache.ozone.test.GenericTestUtils) DFSConfigKeysLegacy(org.apache.hadoop.hdds.DFSConfigKeysLegacy) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) HddsDispatcher(org.apache.hadoop.ozone.container.common.impl.HddsDispatcher) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) XceiverClientGrpc(org.apache.hadoop.hdds.scm.XceiverClientGrpc) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) XceiverServerGrpc(org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc) MockPipeline(org.apache.hadoop.hdds.scm.pipeline.MockPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) HddsVolume(org.apache.hadoop.ozone.container.common.volume.HddsVolume) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) BlockID(org.apache.hadoop.hdds.client.BlockID) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) File(java.io.File) Test(org.junit.Test)

Aggregations

MutableVolumeSet (org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet)9 VolumeSet (org.apache.hadoop.ozone.container.common.volume.VolumeSet)9 ContainerMetrics (org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics)7 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)6 ContainerSet (org.apache.hadoop.ozone.container.common.impl.ContainerSet)6 Test (org.junit.Test)6 File (java.io.File)4 IOException (java.io.IOException)4 UUID (java.util.UUID)4 ContainerCommandRequestProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto)4 Handler (org.apache.hadoop.ozone.container.common.interfaces.Handler)4 DatanodeStateMachine (org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine)4 StateContext (org.apache.hadoop.ozone.container.common.statemachine.StateContext)4 Maps (com.google.common.collect.Maps)3 List (java.util.List)3 Map (java.util.Map)3 BlockID (org.apache.hadoop.hdds.client.BlockID)3 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)3 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)3 ContainerCommandResponseProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)3