use of org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics in project ozone by apache.
the class ContainerCommands method loadContainersFromVolumes.
public void loadContainersFromVolumes() throws IOException {
OzoneConfiguration conf = parent.getOzoneConf();
ContainerSet containerSet = new ContainerSet();
ContainerMetrics metrics = ContainerMetrics.create(conf);
String firstStorageDir = getFirstStorageDir(conf);
String datanodeUuid = getDatanodeUUID(firstStorageDir, conf);
String clusterId = getClusterId(firstStorageDir);
volumeSet = new MutableVolumeSet(datanodeUuid, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
Map<ContainerProtos.ContainerType, Handler> handlers = new HashMap<>();
for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
final Handler handler = Handler.getHandlerForContainerType(containerType, conf, datanodeUuid, containerSet, volumeSet, metrics, containerReplicaProto -> {
});
handler.setClusterID(clusterId);
handlers.put(containerType, handler);
}
controller = new ContainerController(containerSet, handlers);
List<HddsVolume> volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList());
Iterator<HddsVolume> volumeSetIterator = volumes.iterator();
LOG.info("Starting the read all the container metadata");
while (volumeSetIterator.hasNext()) {
HddsVolume volume = volumeSetIterator.next();
LOG.info("Loading container metadata from volume " + volume.toString());
final ContainerReader reader = new ContainerReader(volumeSet, volume, containerSet, conf);
reader.run();
}
LOG.info("All the container metadata is loaded.");
}
use of org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics in project ozone by apache.
the class TestHddsDispatcher method testContainerCloseActionWhenFull.
@Test
public void testContainerCloseActionWhenFull() throws IOException {
String testDir = GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName());
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(HDDS_DATANODE_DIR_KEY, testDir);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
DatanodeDetails dd = randomDatanodeDetails();
MutableVolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
try {
UUID scmId = UUID.randomUUID();
ContainerSet containerSet = new ContainerSet();
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
Mockito.when(context.getParent()).thenReturn(stateMachine);
KeyValueContainerData containerData = new KeyValueContainerData(1L, layout, (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), dd.getUuidString());
Container container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString());
containerSet.addContainer(container);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerType containerType : ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
}
HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
hddsDispatcher.setClusterId(scmId.toString());
ContainerCommandResponseProto responseOne = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseOne.getResult());
verify(context, times(0)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
containerData.setBytesUsed(Double.valueOf(StorageUnit.MB.toBytes(950)).longValue());
ContainerCommandResponseProto responseTwo = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 2L), null);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseTwo.getResult());
verify(context, times(1)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
} finally {
volumeSet.shutdown();
ContainerMetrics.remove();
FileUtils.deleteDirectory(new File(testDir));
}
}
use of org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics in project ozone by apache.
the class TestHddsDispatcher method createDispatcher.
/**
* Creates HddsDispatcher instance with given infos.
* @param dd datanode detail info.
* @param scmId UUID of scm id.
* @param conf configuration be used.
* @return HddsDispatcher HddsDispatcher instance.
* @throws IOException
*/
private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException {
ContainerSet containerSet = new ContainerSet();
VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
Mockito.when(context.getParent()).thenReturn(stateMachine);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerType containerType : ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
}
HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
hddsDispatcher.setClusterId(scmId.toString());
return hddsDispatcher;
}
use of org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics in project ozone by apache.
the class TestHandler method setup.
@Before
public void setup() throws Exception {
this.conf = new OzoneConfiguration();
this.containerSet = Mockito.mock(ContainerSet.class);
this.volumeSet = Mockito.mock(MutableVolumeSet.class);
DatanodeDetails datanodeDetails = Mockito.mock(DatanodeDetails.class);
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(stateMachine);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, TestHddsDispatcher.NO_OP_ICR_SENDER));
}
this.dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, null, metrics, null);
}
use of org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics in project ozone by apache.
the class TestKeyValueHandler method testDeleteContainer.
@Test
public void testDeleteContainer() throws IOException {
final String testDir = GenericTestUtils.getTempPath(TestKeyValueHandler.class.getSimpleName() + "-" + UUID.randomUUID().toString());
try {
final long containerID = 1L;
final ConfigurationSource conf = new OzoneConfiguration();
final ContainerSet containerSet = new ContainerSet();
final VolumeSet volumeSet = Mockito.mock(VolumeSet.class);
Mockito.when(volumeSet.getVolumesList()).thenReturn(Collections.singletonList(new HddsVolume.Builder(testDir).conf(conf).build()));
final int[] interval = new int[1];
interval[0] = 2;
final ContainerMetrics metrics = new ContainerMetrics(interval);
final AtomicInteger icrReceived = new AtomicInteger(0);
final KeyValueHandler kvHandler = new KeyValueHandler(conf, UUID.randomUUID().toString(), containerSet, volumeSet, metrics, c -> icrReceived.incrementAndGet());
kvHandler.setClusterID(UUID.randomUUID().toString());
final ContainerCommandRequestProto createContainer = ContainerCommandRequestProto.newBuilder().setCmdType(ContainerProtos.Type.CreateContainer).setDatanodeUuid(UUID.randomUUID().toString()).setCreateContainer(ContainerProtos.CreateContainerRequestProto.newBuilder().setContainerType(ContainerType.KeyValueContainer).build()).setContainerID(containerID).setPipelineID(UUID.randomUUID().toString()).build();
kvHandler.handleCreateContainer(createContainer, null);
Assert.assertEquals(1, icrReceived.get());
Assert.assertNotNull(containerSet.getContainer(containerID));
kvHandler.deleteContainer(containerSet.getContainer(containerID), true);
Assert.assertEquals(2, icrReceived.get());
Assert.assertNull(containerSet.getContainer(containerID));
} finally {
FileUtils.deleteDirectory(new File(testDir));
}
}
Aggregations