Search in sources :

Example 6 with OzoneConfiguration

use of org.apache.hadoop.hdds.conf.OzoneConfiguration in project ozone by apache.

the class TestSCMContainerPlacementRackAware method setup.

@Before
public void setup() {
    // initialize network topology instance
    conf = new OzoneConfiguration();
    // We are using small units here
    conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 1, StorageUnit.BYTES);
    NodeSchema[] schemas = new NodeSchema[] { ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA };
    NodeSchemaManager.getInstance().init(schemas, true);
    cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance());
    // build datanodes, and network topology
    String rack = "/rack";
    String hostname = "node";
    for (int i = 0; i < datanodeCount; i++) {
        // Totally 3 racks, each has 5 datanodes
        DatanodeDetails datanodeDetails = MockDatanodeDetails.createDatanodeDetails(hostname + i, rack + (i / NODE_PER_RACK));
        DatanodeInfo datanodeInfo = new DatanodeInfo(datanodeDetails, NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto());
        StorageReportProto storage1 = HddsTestUtils.createStorageReport(datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
        MetadataStorageReportProto metaStorage1 = HddsTestUtils.createMetadataStorageReport("/metadata1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
        datanodeInfo.updateStorageReports(new ArrayList<>(Arrays.asList(storage1)));
        datanodeInfo.updateMetaDataStorageReports(new ArrayList<>(Arrays.asList(metaStorage1)));
        datanodes.add(datanodeDetails);
        cluster.add(datanodeDetails);
        dnInfos.add(datanodeInfo);
    }
    if (datanodeCount > 4) {
        StorageReportProto storage2 = HddsTestUtils.createStorageReport(dnInfos.get(2).getUuid(), "/data1-" + datanodes.get(2).getUuidString(), STORAGE_CAPACITY, 90L, 10L, null);
        dnInfos.get(2).updateStorageReports(new ArrayList<>(Arrays.asList(storage2)));
        StorageReportProto storage3 = HddsTestUtils.createStorageReport(dnInfos.get(3).getUuid(), "/data1-" + dnInfos.get(3).getUuidString(), STORAGE_CAPACITY, 80L, 20L, null);
        dnInfos.get(3).updateStorageReports(new ArrayList<>(Arrays.asList(storage3)));
        StorageReportProto storage4 = HddsTestUtils.createStorageReport(dnInfos.get(4).getUuid(), "/data1-" + dnInfos.get(4).getUuidString(), STORAGE_CAPACITY, 70L, 30L, null);
        dnInfos.get(4).updateStorageReports(new ArrayList<>(Arrays.asList(storage4)));
    } else if (datanodeCount > 3) {
        StorageReportProto storage2 = HddsTestUtils.createStorageReport(dnInfos.get(2).getUuid(), "/data1-" + dnInfos.get(2).getUuidString(), STORAGE_CAPACITY, 90L, 10L, null);
        dnInfos.get(2).updateStorageReports(new ArrayList<>(Arrays.asList(storage2)));
        StorageReportProto storage3 = HddsTestUtils.createStorageReport(dnInfos.get(3).getUuid(), "/data1-" + dnInfos.get(3).getUuidString(), STORAGE_CAPACITY, 80L, 20L, null);
        dnInfos.get(3).updateStorageReports(new ArrayList<>(Arrays.asList(storage3)));
    } else if (datanodeCount > 2) {
        StorageReportProto storage2 = HddsTestUtils.createStorageReport(dnInfos.get(2).getUuid(), "/data1-" + dnInfos.get(2).getUuidString(), STORAGE_CAPACITY, 84L, 16L, null);
        dnInfos.get(2).updateStorageReports(new ArrayList<>(Arrays.asList(storage2)));
    }
    // create mock node manager
    nodeManager = Mockito.mock(NodeManager.class);
    when(nodeManager.getNodes(NodeStatus.inServiceHealthy())).thenReturn(new ArrayList<>(datanodes));
    for (DatanodeInfo dn : dnInfos) {
        when(nodeManager.getNodeByUuid(dn.getUuidString())).thenReturn(dn);
    }
    when(nodeManager.getClusterNetworkTopologyMap()).thenReturn(cluster);
    // create placement policy instances
    metrics = SCMContainerPlacementMetrics.create();
    policy = new SCMContainerPlacementRackAware(nodeManager, conf, cluster, true, metrics);
    policyNoFallback = new SCMContainerPlacementRackAware(nodeManager, conf, cluster, false, metrics);
}
Also used : DatanodeInfo(org.apache.hadoop.hdds.scm.node.DatanodeInfo) ArrayList(java.util.ArrayList) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) NetworkTopologyImpl(org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl) NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) NodeSchema(org.apache.hadoop.hdds.scm.net.NodeSchema) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) Before(org.junit.Before)

Example 7 with OzoneConfiguration

use of org.apache.hadoop.hdds.conf.OzoneConfiguration in project ozone by apache.

the class TestContainerPersistence method init.

@BeforeClass
public static void init() {
    conf = new OzoneConfiguration();
    hddsPath = GenericTestUtils.getTempPath(TestContainerPersistence.class.getSimpleName());
    conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, hddsPath);
    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, hddsPath);
    volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) BeforeClass(org.junit.BeforeClass)

Example 8 with OzoneConfiguration

use of org.apache.hadoop.hdds.conf.OzoneConfiguration in project ozone by apache.

the class TestContainerSet method createContainerSet.

private ContainerSet createContainerSet() throws StorageContainerException {
    ContainerSet containerSet = new ContainerSet();
    for (int i = FIRST_ID; i < FIRST_ID + 10; i++) {
        KeyValueContainerData kvData = new KeyValueContainerData(i, layout, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), UUID.randomUUID().toString());
        if (i % 2 == 0) {
            kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED);
        } else {
            kvData.setState(ContainerProtos.ContainerDataProto.State.OPEN);
        }
        KeyValueContainer kv = new KeyValueContainer(kvData, new OzoneConfiguration());
        containerSet.addContainer(kv);
    }
    return containerSet;
}
Also used : OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)

Example 9 with OzoneConfiguration

use of org.apache.hadoop.hdds.conf.OzoneConfiguration in project ozone by apache.

the class TestContainerSet method testAddGetRemoveContainer.

@Test
public void testAddGetRemoveContainer() throws StorageContainerException {
    ContainerSet containerSet = new ContainerSet();
    long containerId = 100L;
    ContainerProtos.ContainerDataProto.State state = ContainerProtos.ContainerDataProto.State.CLOSED;
    KeyValueContainerData kvData = new KeyValueContainerData(containerId, layout, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), UUID.randomUUID().toString());
    kvData.setState(state);
    KeyValueContainer keyValueContainer = new KeyValueContainer(kvData, new OzoneConfiguration());
    // addContainer
    boolean result = containerSet.addContainer(keyValueContainer);
    assertTrue(result);
    try {
        containerSet.addContainer(keyValueContainer);
        fail("Adding same container ID twice should fail.");
    } catch (StorageContainerException ex) {
        GenericTestUtils.assertExceptionContains("Container already exists with" + " container Id " + containerId, ex);
    }
    // getContainer
    KeyValueContainer container = (KeyValueContainer) containerSet.getContainer(containerId);
    KeyValueContainerData keyValueContainerData = container.getContainerData();
    assertEquals(containerId, keyValueContainerData.getContainerID());
    assertEquals(state, keyValueContainerData.getState());
    assertNull(containerSet.getContainer(1000L));
    // removeContainer
    assertTrue(containerSet.removeContainer(containerId));
    assertFalse(containerSet.removeContainer(1000L));
}
Also used : OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Test(org.junit.Test)

Example 10 with OzoneConfiguration

use of org.apache.hadoop.hdds.conf.OzoneConfiguration in project ozone by apache.

the class TestHddsDispatcher method testContainerCloseActionWhenFull.

@Test
public void testContainerCloseActionWhenFull() throws IOException {
    String testDir = GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName());
    OzoneConfiguration conf = new OzoneConfiguration();
    conf.set(HDDS_DATANODE_DIR_KEY, testDir);
    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
    DatanodeDetails dd = randomDatanodeDetails();
    MutableVolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
    try {
        UUID scmId = UUID.randomUUID();
        ContainerSet containerSet = new ContainerSet();
        DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
        StateContext context = Mockito.mock(StateContext.class);
        Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
        Mockito.when(context.getParent()).thenReturn(stateMachine);
        KeyValueContainerData containerData = new KeyValueContainerData(1L, layout, (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), dd.getUuidString());
        Container container = new KeyValueContainer(containerData, conf);
        container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString());
        containerSet.addContainer(container);
        ContainerMetrics metrics = ContainerMetrics.create(conf);
        Map<ContainerType, Handler> handlers = Maps.newHashMap();
        for (ContainerType containerType : ContainerType.values()) {
            handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
        }
        HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
        hddsDispatcher.setClusterId(scmId.toString());
        ContainerCommandResponseProto responseOne = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseOne.getResult());
        verify(context, times(0)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
        containerData.setBytesUsed(Double.valueOf(StorageUnit.MB.toBytes(950)).longValue());
        ContainerCommandResponseProto responseTwo = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 2L), null);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseTwo.getResult());
        verify(context, times(1)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
    } finally {
        volumeSet.shutdown();
        ContainerMetrics.remove();
        FileUtils.deleteDirectory(new File(testDir));
    }
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) ContainerAction(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) UUID(java.util.UUID) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) File(java.io.File) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Test(org.junit.Test)

Aggregations

OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)691 Test (org.junit.Test)269 Before (org.junit.Before)116 File (java.io.File)80 BeforeClass (org.junit.BeforeClass)75 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)70 IOException (java.io.IOException)56 Test (org.junit.jupiter.api.Test)50 ArrayList (java.util.ArrayList)49 OmMetadataManagerImpl (org.apache.hadoop.ozone.om.OmMetadataManagerImpl)35 MockDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails)34 BeforeEach (org.junit.jupiter.api.BeforeEach)34 MockDatanodeDetails.randomDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails)32 UUID (java.util.UUID)29 InetSocketAddress (java.net.InetSocketAddress)27 ECReplicationConfig (org.apache.hadoop.hdds.client.ECReplicationConfig)25 ReplicationConfig (org.apache.hadoop.hdds.client.ReplicationConfig)23 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)23 MockDatanodeDetails.createDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.createDatanodeDetails)22 HDDSLayoutVersionManager (org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager)22