use of org.apache.hadoop.ozone.container.common.impl.ContainerSet in project ozone by apache.
the class ContainerCommands method loadContainersFromVolumes.
public void loadContainersFromVolumes() throws IOException {
OzoneConfiguration conf = parent.getOzoneConf();
ContainerSet containerSet = new ContainerSet();
ContainerMetrics metrics = ContainerMetrics.create(conf);
String firstStorageDir = getFirstStorageDir(conf);
String datanodeUuid = getDatanodeUUID(firstStorageDir, conf);
String clusterId = getClusterId(firstStorageDir);
volumeSet = new MutableVolumeSet(datanodeUuid, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
Map<ContainerProtos.ContainerType, Handler> handlers = new HashMap<>();
for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
final Handler handler = Handler.getHandlerForContainerType(containerType, conf, datanodeUuid, containerSet, volumeSet, metrics, containerReplicaProto -> {
});
handler.setClusterID(clusterId);
handlers.put(containerType, handler);
}
controller = new ContainerController(containerSet, handlers);
List<HddsVolume> volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList());
Iterator<HddsVolume> volumeSetIterator = volumes.iterator();
LOG.info("Starting the read all the container metadata");
while (volumeSetIterator.hasNext()) {
HddsVolume volume = volumeSetIterator.next();
LOG.info("Loading container metadata from volume " + volume.toString());
final ContainerReader reader = new ContainerReader(volumeSet, volume, containerSet, conf);
reader.run();
}
LOG.info("All the container metadata is loaded.");
}
use of org.apache.hadoop.ozone.container.common.impl.ContainerSet in project ozone by apache.
the class TestCSMMetrics method newXceiverServerRatis.
static XceiverServerRatis newXceiverServerRatis(DatanodeDetails dn, OzoneConfiguration conf) throws IOException {
conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue());
final String dir = TEST_DIR + dn.getUuid();
conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
final ContainerDispatcher dispatcher = new TestContainerDispatcher();
return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher, new ContainerController(new ContainerSet(), Maps.newHashMap()), null, null);
}
use of org.apache.hadoop.ozone.container.common.impl.ContainerSet in project ozone by apache.
the class TestHandler method setup.
@Before
public void setup() throws Exception {
this.conf = new OzoneConfiguration();
this.containerSet = Mockito.mock(ContainerSet.class);
this.volumeSet = Mockito.mock(MutableVolumeSet.class);
DatanodeDetails datanodeDetails = Mockito.mock(DatanodeDetails.class);
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(stateMachine);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, TestHddsDispatcher.NO_OP_ICR_SENDER));
}
this.dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, null, metrics, null);
}
use of org.apache.hadoop.ozone.container.common.impl.ContainerSet in project ozone by apache.
the class TestContainerReader method setup.
@Before
public void setup() throws Exception {
File volumeDir = tempDir.newFolder();
volumeSet = Mockito.mock(MutableVolumeSet.class);
containerSet = new ContainerSet();
conf = new OzoneConfiguration();
datanodeId = UUID.randomUUID();
hddsVolume = new HddsVolume.Builder(volumeDir.getAbsolutePath()).conf(conf).datanodeUuid(datanodeId.toString()).clusterID(clusterId).build();
volumeSet = mock(MutableVolumeSet.class);
volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())).thenReturn(hddsVolume);
for (int i = 0; i < 2; i++) {
KeyValueContainerData keyValueContainerData = new KeyValueContainerData(i, ContainerLayoutVersion.FILE_PER_BLOCK, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString());
KeyValueContainer keyValueContainer = new KeyValueContainer(keyValueContainerData, conf);
keyValueContainer.create(volumeSet, volumeChoosingPolicy, clusterId);
List<Long> blkNames;
if (i % 2 == 0) {
blkNames = addBlocks(keyValueContainer, true);
markBlocksForDelete(keyValueContainer, true, blkNames, i);
} else {
blkNames = addBlocks(keyValueContainer, false);
markBlocksForDelete(keyValueContainer, false, blkNames, i);
}
// Close the RocksDB instance for this container and remove from the cache
// so it does not affect the ContainerReader, which avoids using the cache
// at startup for performance reasons.
BlockUtils.removeDB(keyValueContainerData, conf);
}
}
use of org.apache.hadoop.ozone.container.common.impl.ContainerSet in project ozone by apache.
the class ReplicationSupervisorScheduling method test.
@Test
public void test() throws InterruptedException {
List<DatanodeDetails> datanodes = new ArrayList<>();
datanodes.add(MockDatanodeDetails.randomDatanodeDetails());
datanodes.add(MockDatanodeDetails.randomDatanodeDetails());
// locks representing the limited resource of remote and local disks
// datanode -> disk -> lock object (remote resources)
Map<UUID, Map<Integer, Object>> volumeLocks = new HashMap<>();
// disk -> lock (local resources)
Map<Integer, Object> destinationLocks = new HashMap<>();
// init the locks
for (DatanodeDetails datanode : datanodes) {
volumeLocks.put(datanode.getUuid(), new HashMap<>());
for (int i = 0; i < 10; i++) {
volumeLocks.get(datanode.getUuid()).put(i, new Object());
}
}
for (int i = 0; i < 10; i++) {
destinationLocks.put(i, new Object());
}
ContainerSet cs = new ContainerSet();
ReplicationSupervisor rs = new ReplicationSupervisor(cs, // import.
task -> {
// download, limited by the number of source datanodes
final DatanodeDetails sourceDatanode = task.getSources().get(random.nextInt(task.getSources().size()));
final Map<Integer, Object> volumes = volumeLocks.get(sourceDatanode.getUuid());
Object volumeLock = volumes.get(random.nextInt(volumes.size()));
synchronized (volumeLock) {
System.out.println("Downloading " + task.getContainerId() + " from " + sourceDatanode.getUuid());
try {
volumeLock.wait(1000);
} catch (InterruptedException ex) {
ex.printStackTrace();
}
}
// import, limited by the destination datanode
final int volumeIndex = random.nextInt(destinationLocks.size());
Object destinationLock = destinationLocks.get(volumeIndex);
synchronized (destinationLock) {
System.out.println("Importing " + task.getContainerId() + " to disk " + volumeIndex);
try {
destinationLock.wait(1000);
} catch (InterruptedException ex) {
ex.printStackTrace();
}
}
}, 10);
final long start = System.currentTimeMillis();
// schedule 100 container replication
for (int i = 0; i < 100; i++) {
List<DatanodeDetails> sources = new ArrayList<>();
sources.add(datanodes.get(random.nextInt(datanodes.size())));
rs.addTask(new ReplicationTask(i, sources));
}
rs.shutdownAfterFinish();
final long executionTime = System.currentTimeMillis() - start;
System.out.println(executionTime);
Assert.assertTrue("Execution was too slow : " + executionTime + " ms", executionTime < 100_000);
}
Aggregations