Search in sources :

Example 1 with ContainerMetrics

use of org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics in project ozone by apache.

the class ContainerCommands method loadContainersFromVolumes.

public void loadContainersFromVolumes() throws IOException {
    OzoneConfiguration conf = parent.getOzoneConf();
    ContainerSet containerSet = new ContainerSet();
    ContainerMetrics metrics = ContainerMetrics.create(conf);
    String firstStorageDir = getFirstStorageDir(conf);
    String datanodeUuid = getDatanodeUUID(firstStorageDir, conf);
    String clusterId = getClusterId(firstStorageDir);
    volumeSet = new MutableVolumeSet(datanodeUuid, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
    Map<ContainerProtos.ContainerType, Handler> handlers = new HashMap<>();
    for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
        final Handler handler = Handler.getHandlerForContainerType(containerType, conf, datanodeUuid, containerSet, volumeSet, metrics, containerReplicaProto -> {
        });
        handler.setClusterID(clusterId);
        handlers.put(containerType, handler);
    }
    controller = new ContainerController(containerSet, handlers);
    List<HddsVolume> volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList());
    Iterator<HddsVolume> volumeSetIterator = volumes.iterator();
    LOG.info("Starting the read all the container metadata");
    while (volumeSetIterator.hasNext()) {
        HddsVolume volume = volumeSetIterator.next();
        LOG.info("Loading container metadata from volume " + volume.toString());
        final ContainerReader reader = new ContainerReader(volumeSet, volume, containerSet, conf);
        reader.run();
    }
    LOG.info("All the container metadata is loaded.");
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) HashMap(java.util.HashMap) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) ContainerController(org.apache.hadoop.ozone.container.ozoneimpl.ContainerController) HddsVolume(org.apache.hadoop.ozone.container.common.volume.HddsVolume) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) ContainerReader(org.apache.hadoop.ozone.container.ozoneimpl.ContainerReader)

Example 2 with ContainerMetrics

use of org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics in project ozone by apache.

the class TestHddsDispatcher method testContainerCloseActionWhenFull.

@Test
public void testContainerCloseActionWhenFull() throws IOException {
    String testDir = GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName());
    OzoneConfiguration conf = new OzoneConfiguration();
    conf.set(HDDS_DATANODE_DIR_KEY, testDir);
    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
    DatanodeDetails dd = randomDatanodeDetails();
    MutableVolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
    try {
        UUID scmId = UUID.randomUUID();
        ContainerSet containerSet = new ContainerSet();
        DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
        StateContext context = Mockito.mock(StateContext.class);
        Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
        Mockito.when(context.getParent()).thenReturn(stateMachine);
        KeyValueContainerData containerData = new KeyValueContainerData(1L, layout, (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), dd.getUuidString());
        Container container = new KeyValueContainer(containerData, conf);
        container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString());
        containerSet.addContainer(container);
        ContainerMetrics metrics = ContainerMetrics.create(conf);
        Map<ContainerType, Handler> handlers = Maps.newHashMap();
        for (ContainerType containerType : ContainerType.values()) {
            handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
        }
        HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
        hddsDispatcher.setClusterId(scmId.toString());
        ContainerCommandResponseProto responseOne = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseOne.getResult());
        verify(context, times(0)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
        containerData.setBytesUsed(Double.valueOf(StorageUnit.MB.toBytes(950)).longValue());
        ContainerCommandResponseProto responseTwo = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 2L), null);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseTwo.getResult());
        verify(context, times(1)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
    } finally {
        volumeSet.shutdown();
        ContainerMetrics.remove();
        FileUtils.deleteDirectory(new File(testDir));
    }
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) ContainerAction(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) UUID(java.util.UUID) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) File(java.io.File) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Test(org.junit.Test)

Example 3 with ContainerMetrics

use of org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics in project ozone by apache.

the class TestHddsDispatcher method createDispatcher.

/**
 * Creates HddsDispatcher instance with given infos.
 * @param dd datanode detail info.
 * @param scmId UUID of scm id.
 * @param conf configuration be used.
 * @return HddsDispatcher HddsDispatcher instance.
 * @throws IOException
 */
private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException {
    ContainerSet containerSet = new ContainerSet();
    VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
    DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
    StateContext context = Mockito.mock(StateContext.class);
    Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
    Mockito.when(context.getParent()).thenReturn(stateMachine);
    ContainerMetrics metrics = ContainerMetrics.create(conf);
    Map<ContainerType, Handler> handlers = Maps.newHashMap();
    for (ContainerType containerType : ContainerType.values()) {
        handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
    }
    HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
    hddsDispatcher.setClusterId(scmId.toString());
    return hddsDispatcher;
}
Also used : ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet)

Example 4 with ContainerMetrics

use of org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics in project ozone by apache.

the class TestHandler method setup.

@Before
public void setup() throws Exception {
    this.conf = new OzoneConfiguration();
    this.containerSet = Mockito.mock(ContainerSet.class);
    this.volumeSet = Mockito.mock(MutableVolumeSet.class);
    DatanodeDetails datanodeDetails = Mockito.mock(DatanodeDetails.class);
    DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
    StateContext context = Mockito.mock(StateContext.class);
    Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
    Mockito.when(context.getParent()).thenReturn(stateMachine);
    ContainerMetrics metrics = ContainerMetrics.create(conf);
    Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
    for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
        handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, TestHddsDispatcher.NO_OP_ICR_SENDER));
    }
    this.dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, null, metrics, null);
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) KeyValueHandler(org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler) HddsDispatcher(org.apache.hadoop.ozone.container.common.impl.HddsDispatcher) TestHddsDispatcher(org.apache.hadoop.ozone.container.common.impl.TestHddsDispatcher) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) Before(org.junit.Before)

Example 5 with ContainerMetrics

use of org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics in project ozone by apache.

the class TestKeyValueHandler method testDeleteContainer.

@Test
public void testDeleteContainer() throws IOException {
    final String testDir = GenericTestUtils.getTempPath(TestKeyValueHandler.class.getSimpleName() + "-" + UUID.randomUUID().toString());
    try {
        final long containerID = 1L;
        final ConfigurationSource conf = new OzoneConfiguration();
        final ContainerSet containerSet = new ContainerSet();
        final VolumeSet volumeSet = Mockito.mock(VolumeSet.class);
        Mockito.when(volumeSet.getVolumesList()).thenReturn(Collections.singletonList(new HddsVolume.Builder(testDir).conf(conf).build()));
        final int[] interval = new int[1];
        interval[0] = 2;
        final ContainerMetrics metrics = new ContainerMetrics(interval);
        final AtomicInteger icrReceived = new AtomicInteger(0);
        final KeyValueHandler kvHandler = new KeyValueHandler(conf, UUID.randomUUID().toString(), containerSet, volumeSet, metrics, c -> icrReceived.incrementAndGet());
        kvHandler.setClusterID(UUID.randomUUID().toString());
        final ContainerCommandRequestProto createContainer = ContainerCommandRequestProto.newBuilder().setCmdType(ContainerProtos.Type.CreateContainer).setDatanodeUuid(UUID.randomUUID().toString()).setCreateContainer(ContainerProtos.CreateContainerRequestProto.newBuilder().setContainerType(ContainerType.KeyValueContainer).build()).setContainerID(containerID).setPipelineID(UUID.randomUUID().toString()).build();
        kvHandler.handleCreateContainer(createContainer, null);
        Assert.assertEquals(1, icrReceived.get());
        Assert.assertNotNull(containerSet.getContainer(containerID));
        kvHandler.deleteContainer(containerSet.getContainer(containerID), true);
        Assert.assertEquals(2, icrReceived.get());
        Assert.assertNull(containerSet.getContainer(containerID));
    } finally {
        FileUtils.deleteDirectory(new File(testDir));
    }
}
Also used : ConfigurationSource(org.apache.hadoop.hdds.conf.ConfigurationSource) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) File(java.io.File) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet) Test(org.junit.Test)

Aggregations

ContainerMetrics (org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics)17 ContainerSet (org.apache.hadoop.ozone.container.common.impl.ContainerSet)15 Test (org.junit.Test)13 MutableVolumeSet (org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet)12 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)8 KeyValueHandler (org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler)8 Handler (org.apache.hadoop.ozone.container.common.interfaces.Handler)7 DatanodeStateMachine (org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine)7 StateContext (org.apache.hadoop.ozone.container.common.statemachine.StateContext)7 VolumeSet (org.apache.hadoop.ozone.container.common.volume.VolumeSet)7 File (java.io.File)6 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)6 IOException (java.io.IOException)5 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)5 KeyValueContainerData (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)5 UUID (java.util.UUID)4 ContainerCommandRequestProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto)4 ContainerCommandResponseProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)4 HddsDispatcher (org.apache.hadoop.ozone.container.common.impl.HddsDispatcher)4 DatanodeConfiguration (org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration)4