use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.
the class TestHddsDispatcher method createDispatcher.
/**
* Creates HddsDispatcher instance with given infos.
* @param dd datanode detail info.
* @param scmId UUID of scm id.
* @param conf configuration be used.
* @return HddsDispatcher HddsDispatcher instance.
* @throws IOException
*/
private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException {
ContainerSet containerSet = new ContainerSet();
VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
Mockito.when(context.getParent()).thenReturn(stateMachine);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerType containerType : ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
}
HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
hddsDispatcher.setClusterId(scmId.toString());
return hddsDispatcher;
}
use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.
the class TestOzoneContainer method setUp.
@Before
public void setUp() throws Exception {
conf = new OzoneConfiguration();
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.getRoot().getAbsolutePath());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, folder.newFolder().getAbsolutePath());
commitSpaceMap = new HashMap<String, Long>();
volumeSet = new MutableVolumeSet(datanodeDetails.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
}
use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.
the class TestKeyValueContainerIntegrityChecks method setUp.
@Before
public void setUp() throws Exception {
LOG.info("Testing layout:{}", containerLayoutTestInfo.getLayout());
this.testRoot = GenericTestUtils.getRandomizedTestDir();
conf = new OzoneConfiguration();
conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testRoot.getAbsolutePath());
containerLayoutTestInfo.updateConfig(conf);
volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
chunkManager = containerLayoutTestInfo.createChunkManager(true, null);
}
use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.
the class TestKeyValueHandler method testVolumeSetInKeyValueHandler.
@Test
public void testVolumeSetInKeyValueHandler() throws Exception {
File path = GenericTestUtils.getRandomizedTestDir();
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(HDDS_DATANODE_DIR_KEY, path.getAbsolutePath());
conf.set(OZONE_METADATA_DIRS, path.getAbsolutePath());
MutableVolumeSet volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
try {
ContainerSet cset = new ContainerSet();
int[] interval = new int[1];
interval[0] = 2;
ContainerMetrics metrics = new ContainerMetrics(interval);
DatanodeDetails datanodeDetails = Mockito.mock(DatanodeDetails.class);
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(stateMachine);
KeyValueHandler keyValueHandler = new KeyValueHandler(conf, context.getParent().getDatanodeDetails().getUuidString(), cset, volumeSet, metrics, c -> {
});
assertEquals("org.apache.hadoop.ozone.container.common" + ".volume.RoundRobinVolumeChoosingPolicy", keyValueHandler.getVolumeChoosingPolicyForTesting().getClass().getName());
// Set a class which is not of sub class of VolumeChoosingPolicy
conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY, "org.apache.hadoop.ozone.container.common.impl.HddsDispatcher");
try {
new KeyValueHandler(conf, context.getParent().getDatanodeDetails().getUuidString(), cset, volumeSet, metrics, c -> {
});
} catch (RuntimeException ex) {
GenericTestUtils.assertExceptionContains("class org.apache.hadoop" + ".ozone.container.common.impl.HddsDispatcher not org.apache" + ".hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy", ex);
}
} finally {
volumeSet.shutdown();
FileUtil.fullyDelete(path);
}
}
use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.
the class ScmHAFinalizeUpgradeActionDatanode method execute.
@Override
public void execute(DatanodeStateMachine dsm) throws Exception {
LOG.info("Upgrading Datanode volume layout for SCM HA support.");
MutableVolumeSet volumeSet = dsm.getContainer().getVolumeSet();
for (StorageVolume volume : volumeSet.getVolumesList()) {
volumeSet.writeLock();
try {
if (volume instanceof HddsVolume) {
HddsVolume hddsVolume = (HddsVolume) volume;
if (!upgradeVolume(hddsVolume, hddsVolume.getClusterID())) {
volumeSet.failVolume(volume.getStorageDir().getAbsolutePath());
}
}
} finally {
volumeSet.writeUnlock();
}
}
}
Aggregations