use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.
the class ChunkManagerDiskWrite method call.
@Override
public Void call() throws Exception {
try {
init();
OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
VolumeSet volumeSet = new MutableVolumeSet("dnid", "clusterid", ozoneConfiguration, null, StorageVolume.VolumeType.DATA_VOLUME, null);
Random random = new Random();
VolumeChoosingPolicy volumeChoicePolicy = new RoundRobinVolumeChoosingPolicy();
final int threadCount = getThreadNo();
// create a dedicated (NEW) container for each thread
for (int i = 1; i <= threadCount; i++) {
// use a non-negative container id
long containerId = random.nextLong() & 0x0F_FF_FF_FF_FF_FF_FF_FFL;
KeyValueContainerData keyValueContainerData = new KeyValueContainerData(containerId, containerLayout, 1_000_000L, getPrefix(), "nodeid");
KeyValueContainer keyValueContainer = new KeyValueContainer(keyValueContainerData, ozoneConfiguration);
keyValueContainer.create(volumeSet, volumeChoicePolicy, "scmid");
containersPerThread.put(i, keyValueContainer);
}
blockSize = chunkSize * chunksPerBlock;
data = randomAscii(chunkSize).getBytes(UTF_8);
chunkManager = ChunkManagerFactory.createChunkManager(ozoneConfiguration, null, null);
timer = getMetrics().timer("chunk-write");
LOG.info("Running chunk write test: threads={} chunkSize={} " + "chunksPerBlock={} layout={}", threadCount, chunkSize, chunksPerBlock, containerLayout);
runTests(this::writeChunk);
} finally {
if (chunkManager != null) {
chunkManager.shutdown();
}
}
return null;
}
use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.
the class ContainerCommands method loadContainersFromVolumes.
public void loadContainersFromVolumes() throws IOException {
OzoneConfiguration conf = parent.getOzoneConf();
ContainerSet containerSet = new ContainerSet();
ContainerMetrics metrics = ContainerMetrics.create(conf);
String firstStorageDir = getFirstStorageDir(conf);
String datanodeUuid = getDatanodeUUID(firstStorageDir, conf);
String clusterId = getClusterId(firstStorageDir);
volumeSet = new MutableVolumeSet(datanodeUuid, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
Map<ContainerProtos.ContainerType, Handler> handlers = new HashMap<>();
for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
final Handler handler = Handler.getHandlerForContainerType(containerType, conf, datanodeUuid, containerSet, volumeSet, metrics, containerReplicaProto -> {
});
handler.setClusterID(clusterId);
handlers.put(containerType, handler);
}
controller = new ContainerController(containerSet, handlers);
List<HddsVolume> volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList());
Iterator<HddsVolume> volumeSetIterator = volumes.iterator();
LOG.info("Starting the read all the container metadata");
while (volumeSetIterator.hasNext()) {
HddsVolume volume = volumeSetIterator.next();
LOG.info("Loading container metadata from volume " + volume.toString());
final ContainerReader reader = new ContainerReader(volumeSet, volume, containerSet, conf);
reader.run();
}
LOG.info("All the container metadata is loaded.");
}
use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.
the class GeneratorDatanode method call.
@Override
public Void call() throws Exception {
init();
config = createOzoneConfiguration();
BlockManager blockManager = new BlockManagerImpl(config);
chunkManager = ChunkManagerFactory.createChunkManager(config, blockManager, null);
final Collection<String> storageDirs = HddsServerUtil.getDatanodeStorageDirs(config);
String firstStorageDir = StorageLocation.parse(storageDirs.iterator().next()).getUri().getPath();
final Path hddsDir = Paths.get(firstStorageDir, "hdds");
if (!Files.exists(hddsDir)) {
throw new NoSuchFileException(hddsDir + " doesn't exist. Please start a real cluster to initialize the " + "VERSION descriptors, and re-start this generator after the files" + " are created (but after cluster is stopped).");
}
scmId = getScmIdFromStoragePath(hddsDir);
final File versionFile = new File(firstStorageDir, "hdds/VERSION");
Properties props = DatanodeVersionFile.readFrom(versionFile);
if (props.isEmpty()) {
throw new InconsistentStorageStateException("Version file " + versionFile + " is missing");
}
String clusterId = HddsVolumeUtil.getProperty(props, OzoneConsts.CLUSTER_ID, versionFile);
datanodeId = HddsVolumeUtil.getProperty(props, OzoneConsts.DATANODE_UUID, versionFile);
volumeSet = new MutableVolumeSet(datanodeId, clusterId, config, null, StorageVolume.VolumeType.DATA_VOLUME, null);
volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
final OzoneClientConfig ozoneClientConfig = config.getObject(OzoneClientConfig.class);
checksum = new Checksum(ozoneClientConfig.getChecksumType(), ozoneClientConfig.getBytesPerChecksum());
timer = getMetrics().timer("datanode-generator");
runTests(this::generateData);
return null;
}
use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.
the class TestContainerPersistence method setupPaths.
@Before
public void setupPaths() throws IOException {
containerSet = new ContainerSet();
volumeSet = new MutableVolumeSet(DATANODE_UUID, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
blockManager = new BlockManagerImpl(conf);
chunkManager = ChunkManagerFactory.createChunkManager(conf, blockManager, null);
for (String dir : conf.getStrings(ScmConfigKeys.HDDS_DATANODE_DIR_KEY)) {
StorageLocation location = StorageLocation.parse(dir);
FileUtils.forceMkdir(new File(location.getNormalizedUri()));
}
}
use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.
the class TestHddsDispatcher method testContainerCloseActionWhenFull.
@Test
public void testContainerCloseActionWhenFull() throws IOException {
String testDir = GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName());
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(HDDS_DATANODE_DIR_KEY, testDir);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
DatanodeDetails dd = randomDatanodeDetails();
MutableVolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
try {
UUID scmId = UUID.randomUUID();
ContainerSet containerSet = new ContainerSet();
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
Mockito.when(context.getParent()).thenReturn(stateMachine);
KeyValueContainerData containerData = new KeyValueContainerData(1L, layout, (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), dd.getUuidString());
Container container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString());
containerSet.addContainer(container);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerType containerType : ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
}
HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
hddsDispatcher.setClusterId(scmId.toString());
ContainerCommandResponseProto responseOne = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseOne.getResult());
verify(context, times(0)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
containerData.setBytesUsed(Double.valueOf(StorageUnit.MB.toBytes(950)).longValue());
ContainerCommandResponseProto responseTwo = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 2L), null);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseTwo.getResult());
verify(context, times(1)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
} finally {
volumeSet.shutdown();
ContainerMetrics.remove();
FileUtils.deleteDirectory(new File(testDir));
}
}
Aggregations