use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.
the class TestKeyValueBlockIterator method setUp.
@Before
public void setUp() throws Exception {
testRoot = GenericTestUtils.getRandomizedTestDir();
conf = new OzoneConfiguration();
conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testRoot.getAbsolutePath());
volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
containerData = new KeyValueContainerData(105L, layout, (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), UUID.randomUUID().toString());
// Init the container.
container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString());
db = BlockUtils.getDB(containerData, conf);
}
use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.
the class VersionEndpointTask method call.
/**
* Computes a result, or throws an exception if unable to do so.
*
* @return computed result
* @throws Exception if unable to compute a result
*/
@Override
public EndpointStateMachine.EndPointStates call() throws Exception {
rpcEndPoint.lock();
try {
if (rpcEndPoint.getState().equals(EndpointStateMachine.EndPointStates.GETVERSION)) {
SCMVersionResponseProto versionResponse = rpcEndPoint.getEndPoint().getVersion(null);
VersionResponse response = VersionResponse.getFromProtobuf(versionResponse);
rpcEndPoint.setVersion(response);
if (!rpcEndPoint.isPassive()) {
// If end point is passive, datanode does not need to check volumes.
String scmId = response.getValue(OzoneConsts.SCM_ID);
String clusterId = response.getValue(OzoneConsts.CLUSTER_ID);
// Check volumes
MutableVolumeSet volumeSet = ozoneContainer.getVolumeSet();
volumeSet.writeLock();
try {
Map<String, StorageVolume> volumeMap = volumeSet.getVolumeMap();
Preconditions.checkNotNull(scmId, "Reply from SCM: scmId cannot be null");
Preconditions.checkNotNull(clusterId, "Reply from SCM: clusterId cannot be null");
// create version file and also set scm ID or cluster ID.
for (Map.Entry<String, StorageVolume> entry : volumeMap.entrySet()) {
StorageVolume volume = entry.getValue();
boolean result = HddsVolumeUtil.checkVolume((HddsVolume) volume, scmId, clusterId, configuration, LOG);
if (!result) {
volumeSet.failVolume(volume.getStorageDir().getPath());
}
}
if (volumeSet.getVolumesList().size() == 0) {
// All volumes are in inconsistent state
throw new DiskOutOfSpaceException("All configured Volumes are in Inconsistent State");
}
} finally {
volumeSet.writeUnlock();
}
// Start the container services after getting the version information
ozoneContainer.start(clusterId);
}
EndpointStateMachine.EndPointStates nextState = rpcEndPoint.getState().getNextState();
rpcEndPoint.setState(nextState);
rpcEndPoint.zeroMissedCount();
} else {
LOG.debug("Cannot execute GetVersion task as endpoint state machine " + "is in {} state", rpcEndPoint.getState());
}
} catch (DiskOutOfSpaceException ex) {
rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN);
} catch (IOException ex) {
rpcEndPoint.logIfNeeded(ex);
} finally {
rpcEndPoint.unlock();
}
return rpcEndPoint.getState();
}
use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.
the class TestContainerMetrics method testContainerMetrics.
@Test
public void testContainerMetrics() throws Exception {
XceiverServerGrpc server = null;
XceiverClientGrpc client = null;
long containerID = ContainerTestHelper.getTestContainerID();
String path = GenericTestUtils.getRandomizedTempPath();
try {
final int interval = 1;
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
conf.setInt(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY, interval);
DatanodeDetails datanodeDetails = randomDatanodeDetails();
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, path);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, path);
VolumeSet volumeSet = new MutableVolumeSet(datanodeDetails.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
ContainerSet containerSet = new ContainerSet();
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(stateMachine);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, c -> {
}));
}
HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
dispatcher.setClusterId(UUID.randomUUID().toString());
server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, null);
client = new XceiverClientGrpc(pipeline, conf);
server.start();
client.connect();
// Create container
ContainerCommandRequestProto request = ContainerTestHelper.getCreateContainerRequest(containerID, pipeline);
ContainerCommandResponseProto response = client.sendCommand(request);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
// Write Chunk
BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024, null);
ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024, null);
response = client.sendCommand(writeChunkRequest);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
// Read Chunk
ContainerProtos.ContainerCommandRequestProto readChunkRequest = ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest.getWriteChunk());
response = client.sendCommand(readChunkRequest);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
MetricsRecordBuilder containerMetrics = getMetrics("StorageContainerMetrics");
assertCounter("NumOps", 3L, containerMetrics);
assertCounter("numCreateContainer", 1L, containerMetrics);
assertCounter("numWriteChunk", 1L, containerMetrics);
assertCounter("numReadChunk", 1L, containerMetrics);
assertCounter("bytesWriteChunk", 1024L, containerMetrics);
assertCounter("bytesReadChunk", 1024L, containerMetrics);
String sec = interval + "s";
Thread.sleep((interval + 1) * 1000);
assertQuantileGauges("WriteChunkNanos" + sec, containerMetrics);
// Check VolumeIOStats metrics
List<HddsVolume> volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList());
HddsVolume hddsVolume = volumes.get(0);
MetricsRecordBuilder volumeIOMetrics = getMetrics(hddsVolume.getVolumeIOStats().getMetricsSourceName());
assertCounter("ReadBytes", 1024L, volumeIOMetrics);
assertCounter("ReadOpCount", 1L, volumeIOMetrics);
assertCounter("WriteBytes", 1024L, volumeIOMetrics);
assertCounter("WriteOpCount", 1L, volumeIOMetrics);
} finally {
if (client != null) {
client.close();
}
if (server != null) {
server.stop();
}
// clean up volume dir
File file = new File(path);
if (file.exists()) {
FileUtil.fullyDelete(file);
}
}
}
use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.
the class TestSecureContainerServer method createDispatcher.
private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException {
ContainerSet containerSet = new ContainerSet();
conf.set(HDDS_DATANODE_DIR_KEY, Paths.get(TEST_DIR, "dfs", "data", "hdds", RandomStringUtils.randomAlphabetic(4)).toString());
conf.set(OZONE_METADATA_DIRS, TEST_DIR);
VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
Mockito.when(context.getParent()).thenReturn(stateMachine);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, dd.getUuid().toString(), containerSet, volumeSet, metrics, c -> {
}));
}
HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, TokenVerifier.create(new SecurityConfig((conf)), caClient));
hddsDispatcher.setClusterId(scmId.toString());
return hddsDispatcher;
}
use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.
the class TestDatanodeHddsVolumeFailureToleration method testDNCorrectlyHandlesVolumeFailureOnStartup.
@Test
public void testDNCorrectlyHandlesVolumeFailureOnStartup() throws Exception {
HddsDatanodeService dn = datanodes.get(0);
OzoneContainer oc = dn.getDatanodeStateMachine().getContainer();
MutableVolumeSet volSet = oc.getVolumeSet();
StorageVolume vol0 = volSet.getVolumesList().get(0);
StorageVolume vol1 = volSet.getVolumesList().get(1);
File volRootDir0 = vol0.getStorageDir();
File volRootDir1 = vol1.getStorageDir();
// simulate bad volumes <= tolerated
DatanodeTestUtils.simulateBadRootDir(volRootDir0);
// restart datanode to test
cluster.restartHddsDatanode(0, true);
// no exception is good
// fail a second volume
DatanodeTestUtils.simulateBadRootDir(volRootDir1);
// restart datanode to test
try {
cluster.restartHddsDatanode(0, true);
Assert.fail();
} catch (RuntimeException e) {
Assert.assertTrue(e.getMessage().contains("Can't start the HDDS datanode plugin"));
}
// restore bad volumes
DatanodeTestUtils.restoreBadRootDir(volRootDir0);
DatanodeTestUtils.restoreBadRootDir(volRootDir1);
}
Aggregations