Search in sources :

Example 1 with MutableVolumeSet

use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.

the class ChunkManagerDiskWrite method call.

@Override
public Void call() throws Exception {
    try {
        init();
        OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
        VolumeSet volumeSet = new MutableVolumeSet("dnid", "clusterid", ozoneConfiguration, null, StorageVolume.VolumeType.DATA_VOLUME, null);
        Random random = new Random();
        VolumeChoosingPolicy volumeChoicePolicy = new RoundRobinVolumeChoosingPolicy();
        final int threadCount = getThreadNo();
        // create a dedicated (NEW) container for each thread
        for (int i = 1; i <= threadCount; i++) {
            // use a non-negative container id
            long containerId = random.nextLong() & 0x0F_FF_FF_FF_FF_FF_FF_FFL;
            KeyValueContainerData keyValueContainerData = new KeyValueContainerData(containerId, containerLayout, 1_000_000L, getPrefix(), "nodeid");
            KeyValueContainer keyValueContainer = new KeyValueContainer(keyValueContainerData, ozoneConfiguration);
            keyValueContainer.create(volumeSet, volumeChoicePolicy, "scmid");
            containersPerThread.put(i, keyValueContainer);
        }
        blockSize = chunkSize * chunksPerBlock;
        data = randomAscii(chunkSize).getBytes(UTF_8);
        chunkManager = ChunkManagerFactory.createChunkManager(ozoneConfiguration, null, null);
        timer = getMetrics().timer("chunk-write");
        LOG.info("Running chunk write test: threads={} chunkSize={} " + "chunksPerBlock={} layout={}", threadCount, chunkSize, chunksPerBlock, containerLayout);
        runTests(this::writeChunk);
    } finally {
        if (chunkManager != null) {
            chunkManager.shutdown();
        }
    }
    return null;
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) Random(java.util.Random) VolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy) RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)

Example 2 with MutableVolumeSet

use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.

the class ContainerCommands method loadContainersFromVolumes.

public void loadContainersFromVolumes() throws IOException {
    OzoneConfiguration conf = parent.getOzoneConf();
    ContainerSet containerSet = new ContainerSet();
    ContainerMetrics metrics = ContainerMetrics.create(conf);
    String firstStorageDir = getFirstStorageDir(conf);
    String datanodeUuid = getDatanodeUUID(firstStorageDir, conf);
    String clusterId = getClusterId(firstStorageDir);
    volumeSet = new MutableVolumeSet(datanodeUuid, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
    Map<ContainerProtos.ContainerType, Handler> handlers = new HashMap<>();
    for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
        final Handler handler = Handler.getHandlerForContainerType(containerType, conf, datanodeUuid, containerSet, volumeSet, metrics, containerReplicaProto -> {
        });
        handler.setClusterID(clusterId);
        handlers.put(containerType, handler);
    }
    controller = new ContainerController(containerSet, handlers);
    List<HddsVolume> volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList());
    Iterator<HddsVolume> volumeSetIterator = volumes.iterator();
    LOG.info("Starting the read all the container metadata");
    while (volumeSetIterator.hasNext()) {
        HddsVolume volume = volumeSetIterator.next();
        LOG.info("Loading container metadata from volume " + volume.toString());
        final ContainerReader reader = new ContainerReader(volumeSet, volume, containerSet, conf);
        reader.run();
    }
    LOG.info("All the container metadata is loaded.");
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) HashMap(java.util.HashMap) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) ContainerController(org.apache.hadoop.ozone.container.ozoneimpl.ContainerController) HddsVolume(org.apache.hadoop.ozone.container.common.volume.HddsVolume) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) ContainerReader(org.apache.hadoop.ozone.container.ozoneimpl.ContainerReader)

Example 3 with MutableVolumeSet

use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.

the class GeneratorDatanode method call.

@Override
public Void call() throws Exception {
    init();
    config = createOzoneConfiguration();
    BlockManager blockManager = new BlockManagerImpl(config);
    chunkManager = ChunkManagerFactory.createChunkManager(config, blockManager, null);
    final Collection<String> storageDirs = HddsServerUtil.getDatanodeStorageDirs(config);
    String firstStorageDir = StorageLocation.parse(storageDirs.iterator().next()).getUri().getPath();
    final Path hddsDir = Paths.get(firstStorageDir, "hdds");
    if (!Files.exists(hddsDir)) {
        throw new NoSuchFileException(hddsDir + " doesn't exist. Please start a real cluster to initialize the " + "VERSION descriptors, and re-start this generator after the files" + " are created (but after cluster is stopped).");
    }
    scmId = getScmIdFromStoragePath(hddsDir);
    final File versionFile = new File(firstStorageDir, "hdds/VERSION");
    Properties props = DatanodeVersionFile.readFrom(versionFile);
    if (props.isEmpty()) {
        throw new InconsistentStorageStateException("Version file " + versionFile + " is missing");
    }
    String clusterId = HddsVolumeUtil.getProperty(props, OzoneConsts.CLUSTER_ID, versionFile);
    datanodeId = HddsVolumeUtil.getProperty(props, OzoneConsts.DATANODE_UUID, versionFile);
    volumeSet = new MutableVolumeSet(datanodeId, clusterId, config, null, StorageVolume.VolumeType.DATA_VOLUME, null);
    volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
    final OzoneClientConfig ozoneClientConfig = config.getObject(OzoneClientConfig.class);
    checksum = new Checksum(ozoneClientConfig.getChecksumType(), ozoneClientConfig.getBytesPerChecksum());
    timer = getMetrics().timer("datanode-generator");
    runTests(this::generateData);
    return null;
}
Also used : Path(java.nio.file.Path) RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) NoSuchFileException(java.nio.file.NoSuchFileException) OzoneClientConfig(org.apache.hadoop.hdds.scm.OzoneClientConfig) Properties(java.util.Properties) BlockManager(org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager) Checksum(org.apache.hadoop.ozone.common.Checksum) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) DatanodeVersionFile(org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile) File(java.io.File) BlockManagerImpl(org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl) InconsistentStorageStateException(org.apache.hadoop.ozone.common.InconsistentStorageStateException)

Example 4 with MutableVolumeSet

use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.

the class TestContainerPersistence method setupPaths.

@Before
public void setupPaths() throws IOException {
    containerSet = new ContainerSet();
    volumeSet = new MutableVolumeSet(DATANODE_UUID, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
    blockManager = new BlockManagerImpl(conf);
    chunkManager = ChunkManagerFactory.createChunkManager(conf, blockManager, null);
    for (String dir : conf.getStrings(ScmConfigKeys.HDDS_DATANODE_DIR_KEY)) {
        StorageLocation location = StorageLocation.parse(dir);
        FileUtils.forceMkdir(new File(location.getNormalizedUri()));
    }
}
Also used : MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) File(java.io.File) BlockManagerImpl(org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl) Before(org.junit.Before)

Example 5 with MutableVolumeSet

use of org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet in project ozone by apache.

the class TestHddsDispatcher method testContainerCloseActionWhenFull.

@Test
public void testContainerCloseActionWhenFull() throws IOException {
    String testDir = GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName());
    OzoneConfiguration conf = new OzoneConfiguration();
    conf.set(HDDS_DATANODE_DIR_KEY, testDir);
    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
    DatanodeDetails dd = randomDatanodeDetails();
    MutableVolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
    try {
        UUID scmId = UUID.randomUUID();
        ContainerSet containerSet = new ContainerSet();
        DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
        StateContext context = Mockito.mock(StateContext.class);
        Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
        Mockito.when(context.getParent()).thenReturn(stateMachine);
        KeyValueContainerData containerData = new KeyValueContainerData(1L, layout, (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), dd.getUuidString());
        Container container = new KeyValueContainer(containerData, conf);
        container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString());
        containerSet.addContainer(container);
        ContainerMetrics metrics = ContainerMetrics.create(conf);
        Map<ContainerType, Handler> handlers = Maps.newHashMap();
        for (ContainerType containerType : ContainerType.values()) {
            handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
        }
        HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
        hddsDispatcher.setClusterId(scmId.toString());
        ContainerCommandResponseProto responseOne = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseOne.getResult());
        verify(context, times(0)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
        containerData.setBytesUsed(Double.valueOf(StorageUnit.MB.toBytes(950)).longValue());
        ContainerCommandResponseProto responseTwo = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 2L), null);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseTwo.getResult());
        verify(context, times(1)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
    } finally {
        volumeSet.shutdown();
        ContainerMetrics.remove();
        FileUtils.deleteDirectory(new File(testDir));
    }
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) ContainerAction(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) UUID(java.util.UUID) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) File(java.io.File) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Test(org.junit.Test)

Aggregations

MutableVolumeSet (org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet)25 File (java.io.File)12 Test (org.junit.Test)12 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)10 StorageVolume (org.apache.hadoop.ozone.container.common.volume.StorageVolume)10 ContainerMetrics (org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics)9 ContainerSet (org.apache.hadoop.ozone.container.common.impl.ContainerSet)8 IOException (java.io.IOException)7 HddsVolume (org.apache.hadoop.ozone.container.common.volume.HddsVolume)7 RoundRobinVolumeChoosingPolicy (org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy)7 Handler (org.apache.hadoop.ozone.container.common.interfaces.Handler)6 VolumeSet (org.apache.hadoop.ozone.container.common.volume.VolumeSet)6 DatanodeStateMachine (org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine)5 StateContext (org.apache.hadoop.ozone.container.common.statemachine.StateContext)5 Map (java.util.Map)4 UUID (java.util.UUID)4 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)4 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)4 KeyValueContainer (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)4 KeyValueContainerData (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)4