Search in sources :

Example 1 with HdfsDirectoryWithQuotaFeature

use of io.hops.hopsworks.persistence.entity.hdfs.HdfsDirectoryWithQuotaFeature in project hopsworks by logicalclocks.

the class ProjectController method getQuotasInternal.

public QuotasDTO getQuotasInternal(Project project) {
    long hdfsQuota = -1L, hdfsUsage = -1L, hdfsNsQuota = -1L, hdfsNsCount = -1L, dbhdfsQuota = -1L, dbhdfsUsage = -1L, dbhdfsNsQuota = -1L, dbhdfsNsCount = -1L, fshdfsQuota = -1L, fshdfsUsage = -1L, fshdfsNsQuota = -1L, fshdfsNsCount = -1L;
    float yarnRemainingQuota = 0f, yarnTotalQuota = 0f;
    // Yarn Quota
    YarnProjectsQuota yarnQuota = yarnProjectsQuotaFacade.findByProjectName(project.getName());
    if (yarnQuota == null) {
        LOGGER.log(Level.SEVERE, "Cannot find YARN quota information for project: " + project.getName());
    } else {
        yarnRemainingQuota = yarnQuota.getQuotaRemaining();
        yarnTotalQuota = yarnQuota.getTotal();
    }
    // HDFS project directory quota
    Optional<HdfsDirectoryWithQuotaFeature> projectInodeAttrsOptional = hdfsDirectoryWithQuotaFeatureFacade.getByInodeId(project.getInode().getId());
    if (projectInodeAttrsOptional.isPresent()) {
        hdfsQuota = projectInodeAttrsOptional.get().getSsquota().longValue();
        hdfsUsage = projectInodeAttrsOptional.get().getStorageSpace().longValue();
        hdfsNsQuota = projectInodeAttrsOptional.get().getNsquota().longValue();
        hdfsNsCount = projectInodeAttrsOptional.get().getNscount().longValue();
    }
    // If the Hive service is enabled, get the quota information for the db directory
    List<Dataset> datasets = (List<Dataset>) project.getDatasetCollection();
    for (Dataset ds : datasets) {
        if (ds.getDsType() == DatasetType.HIVEDB) {
            Optional<HdfsDirectoryWithQuotaFeature> dbInodeAttrsOptional = hdfsDirectoryWithQuotaFeatureFacade.getByInodeId(ds.getInodeId());
            if (dbInodeAttrsOptional.isPresent()) {
                dbhdfsQuota = dbInodeAttrsOptional.get().getSsquota().longValue();
                dbhdfsUsage = dbInodeAttrsOptional.get().getStorageSpace().longValue();
                dbhdfsNsQuota = dbInodeAttrsOptional.get().getNsquota().longValue();
                dbhdfsNsCount = dbInodeAttrsOptional.get().getNscount().longValue();
            }
        } else if (ds.getDsType() == DatasetType.FEATURESTORE) {
            Optional<HdfsDirectoryWithQuotaFeature> fsInodeAttrsOptional = hdfsDirectoryWithQuotaFeatureFacade.getByInodeId(ds.getInodeId());
            if (fsInodeAttrsOptional.isPresent()) {
                fshdfsQuota = fsInodeAttrsOptional.get().getSsquota().longValue();
                fshdfsUsage = fsInodeAttrsOptional.get().getStorageSpace().longValue();
                fshdfsNsQuota = fsInodeAttrsOptional.get().getNsquota().longValue();
                fshdfsNsCount = fsInodeAttrsOptional.get().getNscount().longValue();
            }
        }
    }
    Integer kafkaQuota = project.getKafkaMaxNumTopics();
    return new QuotasDTO(yarnRemainingQuota, yarnTotalQuota, hdfsQuota, hdfsUsage, hdfsNsQuota, hdfsNsCount, dbhdfsQuota, dbhdfsUsage, dbhdfsNsQuota, dbhdfsNsCount, fshdfsQuota, fshdfsUsage, fshdfsNsQuota, fshdfsNsCount, kafkaQuota);
}
Also used : Optional(java.util.Optional) Dataset(io.hops.hopsworks.persistence.entity.dataset.Dataset) ArrayList(java.util.ArrayList) List(java.util.List) YarnProjectsQuota(io.hops.hopsworks.persistence.entity.jobs.quota.YarnProjectsQuota) HdfsDirectoryWithQuotaFeature(io.hops.hopsworks.persistence.entity.hdfs.HdfsDirectoryWithQuotaFeature)

Aggregations

Dataset (io.hops.hopsworks.persistence.entity.dataset.Dataset)1 HdfsDirectoryWithQuotaFeature (io.hops.hopsworks.persistence.entity.hdfs.HdfsDirectoryWithQuotaFeature)1 YarnProjectsQuota (io.hops.hopsworks.persistence.entity.jobs.quota.YarnProjectsQuota)1 ArrayList (java.util.ArrayList)1 List (java.util.List)1 Optional (java.util.Optional)1