use of org.apache.hadoop.hdds.conf.OzoneConfiguration in project ozone by apache.
the class TestSCMContainerPlacementRackAware method setup.
@Before
public void setup() {
// initialize network topology instance
conf = new OzoneConfiguration();
// We are using small units here
conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 1, StorageUnit.BYTES);
NodeSchema[] schemas = new NodeSchema[] { ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA };
NodeSchemaManager.getInstance().init(schemas, true);
cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance());
// build datanodes, and network topology
String rack = "/rack";
String hostname = "node";
for (int i = 0; i < datanodeCount; i++) {
// Totally 3 racks, each has 5 datanodes
DatanodeDetails datanodeDetails = MockDatanodeDetails.createDatanodeDetails(hostname + i, rack + (i / NODE_PER_RACK));
DatanodeInfo datanodeInfo = new DatanodeInfo(datanodeDetails, NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto());
StorageReportProto storage1 = HddsTestUtils.createStorageReport(datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
MetadataStorageReportProto metaStorage1 = HddsTestUtils.createMetadataStorageReport("/metadata1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
datanodeInfo.updateStorageReports(new ArrayList<>(Arrays.asList(storage1)));
datanodeInfo.updateMetaDataStorageReports(new ArrayList<>(Arrays.asList(metaStorage1)));
datanodes.add(datanodeDetails);
cluster.add(datanodeDetails);
dnInfos.add(datanodeInfo);
}
if (datanodeCount > 4) {
StorageReportProto storage2 = HddsTestUtils.createStorageReport(dnInfos.get(2).getUuid(), "/data1-" + datanodes.get(2).getUuidString(), STORAGE_CAPACITY, 90L, 10L, null);
dnInfos.get(2).updateStorageReports(new ArrayList<>(Arrays.asList(storage2)));
StorageReportProto storage3 = HddsTestUtils.createStorageReport(dnInfos.get(3).getUuid(), "/data1-" + dnInfos.get(3).getUuidString(), STORAGE_CAPACITY, 80L, 20L, null);
dnInfos.get(3).updateStorageReports(new ArrayList<>(Arrays.asList(storage3)));
StorageReportProto storage4 = HddsTestUtils.createStorageReport(dnInfos.get(4).getUuid(), "/data1-" + dnInfos.get(4).getUuidString(), STORAGE_CAPACITY, 70L, 30L, null);
dnInfos.get(4).updateStorageReports(new ArrayList<>(Arrays.asList(storage4)));
} else if (datanodeCount > 3) {
StorageReportProto storage2 = HddsTestUtils.createStorageReport(dnInfos.get(2).getUuid(), "/data1-" + dnInfos.get(2).getUuidString(), STORAGE_CAPACITY, 90L, 10L, null);
dnInfos.get(2).updateStorageReports(new ArrayList<>(Arrays.asList(storage2)));
StorageReportProto storage3 = HddsTestUtils.createStorageReport(dnInfos.get(3).getUuid(), "/data1-" + dnInfos.get(3).getUuidString(), STORAGE_CAPACITY, 80L, 20L, null);
dnInfos.get(3).updateStorageReports(new ArrayList<>(Arrays.asList(storage3)));
} else if (datanodeCount > 2) {
StorageReportProto storage2 = HddsTestUtils.createStorageReport(dnInfos.get(2).getUuid(), "/data1-" + dnInfos.get(2).getUuidString(), STORAGE_CAPACITY, 84L, 16L, null);
dnInfos.get(2).updateStorageReports(new ArrayList<>(Arrays.asList(storage2)));
}
// create mock node manager
nodeManager = Mockito.mock(NodeManager.class);
when(nodeManager.getNodes(NodeStatus.inServiceHealthy())).thenReturn(new ArrayList<>(datanodes));
for (DatanodeInfo dn : dnInfos) {
when(nodeManager.getNodeByUuid(dn.getUuidString())).thenReturn(dn);
}
when(nodeManager.getClusterNetworkTopologyMap()).thenReturn(cluster);
// create placement policy instances
metrics = SCMContainerPlacementMetrics.create();
policy = new SCMContainerPlacementRackAware(nodeManager, conf, cluster, true, metrics);
policyNoFallback = new SCMContainerPlacementRackAware(nodeManager, conf, cluster, false, metrics);
}
use of org.apache.hadoop.hdds.conf.OzoneConfiguration in project ozone by apache.
the class TestContainerPersistence method init.
@BeforeClass
public static void init() {
conf = new OzoneConfiguration();
hddsPath = GenericTestUtils.getTempPath(TestContainerPersistence.class.getSimpleName());
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, hddsPath);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, hddsPath);
volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
}
use of org.apache.hadoop.hdds.conf.OzoneConfiguration in project ozone by apache.
the class TestContainerSet method createContainerSet.
private ContainerSet createContainerSet() throws StorageContainerException {
ContainerSet containerSet = new ContainerSet();
for (int i = FIRST_ID; i < FIRST_ID + 10; i++) {
KeyValueContainerData kvData = new KeyValueContainerData(i, layout, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), UUID.randomUUID().toString());
if (i % 2 == 0) {
kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED);
} else {
kvData.setState(ContainerProtos.ContainerDataProto.State.OPEN);
}
KeyValueContainer kv = new KeyValueContainer(kvData, new OzoneConfiguration());
containerSet.addContainer(kv);
}
return containerSet;
}
use of org.apache.hadoop.hdds.conf.OzoneConfiguration in project ozone by apache.
the class TestContainerSet method testAddGetRemoveContainer.
@Test
public void testAddGetRemoveContainer() throws StorageContainerException {
ContainerSet containerSet = new ContainerSet();
long containerId = 100L;
ContainerProtos.ContainerDataProto.State state = ContainerProtos.ContainerDataProto.State.CLOSED;
KeyValueContainerData kvData = new KeyValueContainerData(containerId, layout, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), UUID.randomUUID().toString());
kvData.setState(state);
KeyValueContainer keyValueContainer = new KeyValueContainer(kvData, new OzoneConfiguration());
// addContainer
boolean result = containerSet.addContainer(keyValueContainer);
assertTrue(result);
try {
containerSet.addContainer(keyValueContainer);
fail("Adding same container ID twice should fail.");
} catch (StorageContainerException ex) {
GenericTestUtils.assertExceptionContains("Container already exists with" + " container Id " + containerId, ex);
}
// getContainer
KeyValueContainer container = (KeyValueContainer) containerSet.getContainer(containerId);
KeyValueContainerData keyValueContainerData = container.getContainerData();
assertEquals(containerId, keyValueContainerData.getContainerID());
assertEquals(state, keyValueContainerData.getState());
assertNull(containerSet.getContainer(1000L));
// removeContainer
assertTrue(containerSet.removeContainer(containerId));
assertFalse(containerSet.removeContainer(1000L));
}
use of org.apache.hadoop.hdds.conf.OzoneConfiguration in project ozone by apache.
the class TestHddsDispatcher method testContainerCloseActionWhenFull.
@Test
public void testContainerCloseActionWhenFull() throws IOException {
String testDir = GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName());
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(HDDS_DATANODE_DIR_KEY, testDir);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
DatanodeDetails dd = randomDatanodeDetails();
MutableVolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
try {
UUID scmId = UUID.randomUUID();
ContainerSet containerSet = new ContainerSet();
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
Mockito.when(context.getParent()).thenReturn(stateMachine);
KeyValueContainerData containerData = new KeyValueContainerData(1L, layout, (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), dd.getUuidString());
Container container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString());
containerSet.addContainer(container);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerType containerType : ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
}
HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
hddsDispatcher.setClusterId(scmId.toString());
ContainerCommandResponseProto responseOne = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseOne.getResult());
verify(context, times(0)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
containerData.setBytesUsed(Double.valueOf(StorageUnit.MB.toBytes(950)).longValue());
ContainerCommandResponseProto responseTwo = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 2L), null);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseTwo.getResult());
verify(context, times(1)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
} finally {
volumeSet.shutdown();
ContainerMetrics.remove();
FileUtils.deleteDirectory(new File(testDir));
}
}
Aggregations