use of org.apache.hadoop.hdds.scm.XceiverClientGrpc in project ozone by apache.
the class TestContainerMetrics method testContainerMetrics.
@Test
public void testContainerMetrics() throws Exception {
XceiverServerGrpc server = null;
XceiverClientGrpc client = null;
long containerID = ContainerTestHelper.getTestContainerID();
String path = GenericTestUtils.getRandomizedTempPath();
try {
final int interval = 1;
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
conf.setInt(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY, interval);
DatanodeDetails datanodeDetails = randomDatanodeDetails();
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, path);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, path);
VolumeSet volumeSet = new MutableVolumeSet(datanodeDetails.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
ContainerSet containerSet = new ContainerSet();
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(stateMachine);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, c -> {
}));
}
HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
dispatcher.setClusterId(UUID.randomUUID().toString());
server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, null);
client = new XceiverClientGrpc(pipeline, conf);
server.start();
client.connect();
// Create container
ContainerCommandRequestProto request = ContainerTestHelper.getCreateContainerRequest(containerID, pipeline);
ContainerCommandResponseProto response = client.sendCommand(request);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
// Write Chunk
BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024, null);
ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024, null);
response = client.sendCommand(writeChunkRequest);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
// Read Chunk
ContainerProtos.ContainerCommandRequestProto readChunkRequest = ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest.getWriteChunk());
response = client.sendCommand(readChunkRequest);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
MetricsRecordBuilder containerMetrics = getMetrics("StorageContainerMetrics");
assertCounter("NumOps", 3L, containerMetrics);
assertCounter("numCreateContainer", 1L, containerMetrics);
assertCounter("numWriteChunk", 1L, containerMetrics);
assertCounter("numReadChunk", 1L, containerMetrics);
assertCounter("bytesWriteChunk", 1024L, containerMetrics);
assertCounter("bytesReadChunk", 1024L, containerMetrics);
String sec = interval + "s";
Thread.sleep((interval + 1) * 1000);
assertQuantileGauges("WriteChunkNanos" + sec, containerMetrics);
// Check VolumeIOStats metrics
List<HddsVolume> volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList());
HddsVolume hddsVolume = volumes.get(0);
MetricsRecordBuilder volumeIOMetrics = getMetrics(hddsVolume.getVolumeIOStats().getMetricsSourceName());
assertCounter("ReadBytes", 1024L, volumeIOMetrics);
assertCounter("ReadOpCount", 1L, volumeIOMetrics);
assertCounter("WriteBytes", 1024L, volumeIOMetrics);
assertCounter("WriteOpCount", 1L, volumeIOMetrics);
} finally {
if (client != null) {
client.close();
}
if (server != null) {
server.stop();
}
// clean up volume dir
File file = new File(path);
if (file.exists()) {
FileUtil.fullyDelete(file);
}
}
}
use of org.apache.hadoop.hdds.scm.XceiverClientGrpc in project ozone by apache.
the class TestOzoneContainer method testDeleteContainer.
@Test
public void testDeleteContainer() throws Exception {
MiniOzoneCluster cluster = null;
XceiverClientGrpc client = null;
ContainerProtos.ContainerCommandResponseProto response;
ContainerProtos.ContainerCommandRequestProto request, writeChunkRequest, putBlockRequest;
try {
OzoneConfiguration conf = newOzoneConfiguration();
conf.set(OZONE_METADATA_DIRS, tempFolder.newFolder().getPath());
conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.newFolder().getPath());
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
cluster.waitForClusterToBeReady();
client = createClientForTesting(cluster);
client.connect();
long containerID = ContainerTestHelper.getTestContainerID();
createContainerForTesting(client, containerID);
writeChunkRequest = writeChunkForContainer(client, containerID, 1024);
putBlockRequest = ContainerTestHelper.getPutBlockRequest(client.getPipeline(), writeChunkRequest.getWriteChunk());
// Put key before deleting.
response = client.sendCommand(putBlockRequest);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
// Container cannot be deleted because force flag is set to false and
// the container is still open
request = ContainerTestHelper.getDeleteContainer(client.getPipeline(), containerID, false);
response = client.sendCommand(request);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.DELETE_ON_OPEN_CONTAINER, response.getResult());
// Container can be deleted, by setting force flag, even with out closing
request = ContainerTestHelper.getDeleteContainer(client.getPipeline(), containerID, true);
response = client.sendCommand(request);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
} finally {
if (client != null) {
client.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdds.scm.XceiverClientGrpc in project ozone by apache.
the class TestOzoneContainer method testCreateOzoneContainer.
@Test
public void testCreateOzoneContainer() throws Exception {
long containerID = ContainerTestHelper.getTestContainerID();
OzoneConfiguration conf = newOzoneConfiguration();
OzoneContainer container = null;
try {
// We don't start Ozone Container via data node, we will do it
// independently in our test path.
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
conf.set(OZONE_METADATA_DIRS, tempFolder.newFolder().getPath());
conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.newFolder().getPath());
conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
DatanodeDetails datanodeDetails = randomDatanodeDetails();
StateContext context = Mockito.mock(StateContext.class);
DatanodeStateMachine dsm = Mockito.mock(DatanodeStateMachine.class);
Mockito.when(dsm.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(dsm);
container = new OzoneContainer(datanodeDetails, conf, context, null);
// Set clusterId and manually start ozone container.
container.start(UUID.randomUUID().toString());
XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf);
client.connect();
createContainerForTesting(client, containerID);
} finally {
if (container != null) {
container.stop();
}
}
}
use of org.apache.hadoop.hdds.scm.XceiverClientGrpc in project ozone by apache.
the class TestOzoneContainer method testXcieverClientAsync.
@Test
public void testXcieverClientAsync() throws Exception {
MiniOzoneCluster cluster = null;
XceiverClientGrpc client = null;
try {
OzoneConfiguration conf = newOzoneConfiguration();
conf.set(OZONE_METADATA_DIRS, tempFolder.newFolder().getPath());
conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.newFolder().getPath());
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
cluster.waitForClusterToBeReady();
long containerID = ContainerTestHelper.getTestContainerID();
client = createClientForTesting(cluster);
runAsyncTests(containerID, client);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdds.scm.XceiverClientGrpc in project ozone by apache.
the class TestOzoneContainer method testOzoneContainerViaDataNode.
@Test
public void testOzoneContainerViaDataNode() throws Exception {
MiniOzoneCluster cluster = null;
try {
long containerID = ContainerTestHelper.getTestContainerID();
OzoneConfiguration conf = newOzoneConfiguration();
// Start ozone container Via Datanode create.
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
cluster.waitForClusterToBeReady();
// This client talks to ozone container via datanode.
XceiverClientGrpc client = createClientForTesting(cluster);
runTestOzoneContainerViaDataNode(containerID, client);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations