Search in sources :

Example 11 with XceiverClientGrpc

use of org.apache.hadoop.hdds.scm.XceiverClientGrpc in project ozone by apache.

the class TestContainerMetrics method testContainerMetrics.

@Test
public void testContainerMetrics() throws Exception {
    XceiverServerGrpc server = null;
    XceiverClientGrpc client = null;
    long containerID = ContainerTestHelper.getTestContainerID();
    String path = GenericTestUtils.getRandomizedTempPath();
    try {
        final int interval = 1;
        Pipeline pipeline = MockPipeline.createSingleNodePipeline();
        OzoneConfiguration conf = new OzoneConfiguration();
        conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
        conf.setInt(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY, interval);
        DatanodeDetails datanodeDetails = randomDatanodeDetails();
        conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, path);
        conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, path);
        VolumeSet volumeSet = new MutableVolumeSet(datanodeDetails.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
        ContainerSet containerSet = new ContainerSet();
        DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
        StateContext context = Mockito.mock(StateContext.class);
        Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
        Mockito.when(context.getParent()).thenReturn(stateMachine);
        ContainerMetrics metrics = ContainerMetrics.create(conf);
        Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
        for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
            handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, c -> {
            }));
        }
        HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
        dispatcher.setClusterId(UUID.randomUUID().toString());
        server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, null);
        client = new XceiverClientGrpc(pipeline, conf);
        server.start();
        client.connect();
        // Create container
        ContainerCommandRequestProto request = ContainerTestHelper.getCreateContainerRequest(containerID, pipeline);
        ContainerCommandResponseProto response = client.sendCommand(request);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
        // Write Chunk
        BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
        ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024, null);
        ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024, null);
        response = client.sendCommand(writeChunkRequest);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
        // Read Chunk
        ContainerProtos.ContainerCommandRequestProto readChunkRequest = ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest.getWriteChunk());
        response = client.sendCommand(readChunkRequest);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
        MetricsRecordBuilder containerMetrics = getMetrics("StorageContainerMetrics");
        assertCounter("NumOps", 3L, containerMetrics);
        assertCounter("numCreateContainer", 1L, containerMetrics);
        assertCounter("numWriteChunk", 1L, containerMetrics);
        assertCounter("numReadChunk", 1L, containerMetrics);
        assertCounter("bytesWriteChunk", 1024L, containerMetrics);
        assertCounter("bytesReadChunk", 1024L, containerMetrics);
        String sec = interval + "s";
        Thread.sleep((interval + 1) * 1000);
        assertQuantileGauges("WriteChunkNanos" + sec, containerMetrics);
        // Check VolumeIOStats metrics
        List<HddsVolume> volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList());
        HddsVolume hddsVolume = volumes.get(0);
        MetricsRecordBuilder volumeIOMetrics = getMetrics(hddsVolume.getVolumeIOStats().getMetricsSourceName());
        assertCounter("ReadBytes", 1024L, volumeIOMetrics);
        assertCounter("ReadOpCount", 1L, volumeIOMetrics);
        assertCounter("WriteBytes", 1024L, volumeIOMetrics);
        assertCounter("WriteOpCount", 1L, volumeIOMetrics);
    } finally {
        if (client != null) {
            client.close();
        }
        if (server != null) {
            server.stop();
        }
        // clean up volume dir
        File file = new File(path);
        if (file.exists()) {
            FileUtil.fullyDelete(file);
        }
    }
}
Also used : ScmConfigKeys(org.apache.hadoop.hdds.scm.ScmConfigKeys) HddsDispatcher(org.apache.hadoop.ozone.container.common.impl.HddsDispatcher) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) MockPipeline(org.apache.hadoop.hdds.scm.pipeline.MockPipeline) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) BlockID(org.apache.hadoop.hdds.client.BlockID) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) MetricsAsserts.getMetrics(org.apache.hadoop.test.MetricsAsserts.getMetrics) HddsVolume(org.apache.hadoop.ozone.container.common.volume.HddsVolume) MetricsAsserts.assertCounter(org.apache.hadoop.test.MetricsAsserts.assertCounter) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Map(java.util.Map) Timeout(org.junit.rules.Timeout) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) StorageVolumeUtil(org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil) FileUtil(org.apache.hadoop.fs.FileUtil) ContainerTestHelper(org.apache.hadoop.ozone.container.ContainerTestHelper) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Test(org.junit.Test) UUID(java.util.UUID) XceiverServerGrpc(org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc) Maps(com.google.common.collect.Maps) File(java.io.File) XceiverClientGrpc(org.apache.hadoop.hdds.scm.XceiverClientGrpc) Mockito(org.mockito.Mockito) List(java.util.List) StorageVolume(org.apache.hadoop.ozone.container.common.volume.StorageVolume) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet) Rule(org.junit.Rule) MetricsAsserts.assertQuantileGauges(org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges) OzoneConfigKeys(org.apache.hadoop.ozone.OzoneConfigKeys) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) Assert(org.junit.Assert) GenericTestUtils(org.apache.ozone.test.GenericTestUtils) DFSConfigKeysLegacy(org.apache.hadoop.hdds.DFSConfigKeysLegacy) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) HddsDispatcher(org.apache.hadoop.ozone.container.common.impl.HddsDispatcher) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) XceiverClientGrpc(org.apache.hadoop.hdds.scm.XceiverClientGrpc) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) XceiverServerGrpc(org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc) MockPipeline(org.apache.hadoop.hdds.scm.pipeline.MockPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) HddsVolume(org.apache.hadoop.ozone.container.common.volume.HddsVolume) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) BlockID(org.apache.hadoop.hdds.client.BlockID) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) File(java.io.File) Test(org.junit.Test)

Example 12 with XceiverClientGrpc

use of org.apache.hadoop.hdds.scm.XceiverClientGrpc in project ozone by apache.

the class TestOzoneContainer method testDeleteContainer.

@Test
public void testDeleteContainer() throws Exception {
    MiniOzoneCluster cluster = null;
    XceiverClientGrpc client = null;
    ContainerProtos.ContainerCommandResponseProto response;
    ContainerProtos.ContainerCommandRequestProto request, writeChunkRequest, putBlockRequest;
    try {
        OzoneConfiguration conf = newOzoneConfiguration();
        conf.set(OZONE_METADATA_DIRS, tempFolder.newFolder().getPath());
        conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.newFolder().getPath());
        cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
        cluster.waitForClusterToBeReady();
        client = createClientForTesting(cluster);
        client.connect();
        long containerID = ContainerTestHelper.getTestContainerID();
        createContainerForTesting(client, containerID);
        writeChunkRequest = writeChunkForContainer(client, containerID, 1024);
        putBlockRequest = ContainerTestHelper.getPutBlockRequest(client.getPipeline(), writeChunkRequest.getWriteChunk());
        // Put key before deleting.
        response = client.sendCommand(putBlockRequest);
        Assert.assertNotNull(response);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
        // Container cannot be deleted because force flag is set to false and
        // the container is still open
        request = ContainerTestHelper.getDeleteContainer(client.getPipeline(), containerID, false);
        response = client.sendCommand(request);
        Assert.assertNotNull(response);
        Assert.assertEquals(ContainerProtos.Result.DELETE_ON_OPEN_CONTAINER, response.getResult());
        // Container can be deleted, by setting force flag, even with out closing
        request = ContainerTestHelper.getDeleteContainer(client.getPipeline(), containerID, true);
        response = client.sendCommand(request);
        Assert.assertNotNull(response);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
    } finally {
        if (client != null) {
            client.close();
        }
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) MiniOzoneCluster(org.apache.hadoop.ozone.MiniOzoneCluster) XceiverClientGrpc(org.apache.hadoop.hdds.scm.XceiverClientGrpc) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) Test(org.junit.jupiter.api.Test)

Example 13 with XceiverClientGrpc

use of org.apache.hadoop.hdds.scm.XceiverClientGrpc in project ozone by apache.

the class TestOzoneContainer method testCreateOzoneContainer.

@Test
public void testCreateOzoneContainer() throws Exception {
    long containerID = ContainerTestHelper.getTestContainerID();
    OzoneConfiguration conf = newOzoneConfiguration();
    OzoneContainer container = null;
    try {
        // We don't start Ozone Container via data node, we will do it
        // independently in our test path.
        Pipeline pipeline = MockPipeline.createSingleNodePipeline();
        conf.set(OZONE_METADATA_DIRS, tempFolder.newFolder().getPath());
        conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.newFolder().getPath());
        conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
        DatanodeDetails datanodeDetails = randomDatanodeDetails();
        StateContext context = Mockito.mock(StateContext.class);
        DatanodeStateMachine dsm = Mockito.mock(DatanodeStateMachine.class);
        Mockito.when(dsm.getDatanodeDetails()).thenReturn(datanodeDetails);
        Mockito.when(context.getParent()).thenReturn(dsm);
        container = new OzoneContainer(datanodeDetails, conf, context, null);
        // Set clusterId and manually start ozone container.
        container.start(UUID.randomUUID().toString());
        XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf);
        client.connect();
        createContainerForTesting(client, containerID);
    } finally {
        if (container != null) {
            container.stop();
        }
    }
}
Also used : MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) XceiverClientGrpc(org.apache.hadoop.hdds.scm.XceiverClientGrpc) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) MockPipeline(org.apache.hadoop.hdds.scm.pipeline.MockPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Test(org.junit.jupiter.api.Test)

Example 14 with XceiverClientGrpc

use of org.apache.hadoop.hdds.scm.XceiverClientGrpc in project ozone by apache.

the class TestOzoneContainer method testXcieverClientAsync.

@Test
public void testXcieverClientAsync() throws Exception {
    MiniOzoneCluster cluster = null;
    XceiverClientGrpc client = null;
    try {
        OzoneConfiguration conf = newOzoneConfiguration();
        conf.set(OZONE_METADATA_DIRS, tempFolder.newFolder().getPath());
        conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.newFolder().getPath());
        cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
        cluster.waitForClusterToBeReady();
        long containerID = ContainerTestHelper.getTestContainerID();
        client = createClientForTesting(cluster);
        runAsyncTests(containerID, client);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniOzoneCluster(org.apache.hadoop.ozone.MiniOzoneCluster) XceiverClientGrpc(org.apache.hadoop.hdds.scm.XceiverClientGrpc) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) Test(org.junit.jupiter.api.Test)

Example 15 with XceiverClientGrpc

use of org.apache.hadoop.hdds.scm.XceiverClientGrpc in project ozone by apache.

the class TestOzoneContainer method testOzoneContainerViaDataNode.

@Test
public void testOzoneContainerViaDataNode() throws Exception {
    MiniOzoneCluster cluster = null;
    try {
        long containerID = ContainerTestHelper.getTestContainerID();
        OzoneConfiguration conf = newOzoneConfiguration();
        // Start ozone container Via Datanode create.
        cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
        cluster.waitForClusterToBeReady();
        // This client talks to ozone container via datanode.
        XceiverClientGrpc client = createClientForTesting(cluster);
        runTestOzoneContainerViaDataNode(containerID, client);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniOzoneCluster(org.apache.hadoop.ozone.MiniOzoneCluster) XceiverClientGrpc(org.apache.hadoop.hdds.scm.XceiverClientGrpc) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) Test(org.junit.jupiter.api.Test)

Aggregations

XceiverClientGrpc (org.apache.hadoop.hdds.scm.XceiverClientGrpc)22 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)12 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)11 Test (org.junit.Test)11 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)10 Test (org.junit.jupiter.api.Test)9 MockDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails)6 MockPipeline (org.apache.hadoop.hdds.scm.pipeline.MockPipeline)6 MiniOzoneCluster (org.apache.hadoop.ozone.MiniOzoneCluster)6 MockDatanodeDetails.randomDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails)5 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)4 ContainerCommandResponseProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)4 GenericTestUtils (org.apache.ozone.test.GenericTestUtils)4 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)3 ContainerCommandRequestProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto)3 HddsDispatcher (org.apache.hadoop.ozone.container.common.impl.HddsDispatcher)3 DatanodeStateMachine (org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine)3 StateContext (org.apache.hadoop.ozone.container.common.statemachine.StateContext)3 XceiverServerGrpc (org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc)3