Search in sources :

Example 1 with HdfsUsers

use of io.hops.hopsworks.persistence.entity.hdfs.user.HdfsUsers in project hopsworks by logicalclocks.

the class ModelsBuilder method buildFilter.

private Pair<ProvStateParamBuilder, ModelRegistryDTO> buildFilter(Project project, Project modelRegistryProject, Set<? extends AbstractFacade.FilterBy> filters) throws GenericException, ProvenanceException, DatasetException {
    ProvStateParamBuilder provFilesParamBuilder = new ProvStateParamBuilder();
    if (filters != null) {
        Users filterUser = null;
        Project filterUserProject = project;
        for (AbstractFacade.FilterBy filterBy : filters) {
            if (filterBy.getParam().compareToIgnoreCase(Filters.NAME_EQ.name()) == 0) {
                provFilesParamBuilder.filterByXAttr(MODEL_SUMMARY_XATTR_NAME + ".name", filterBy.getValue());
            } else if (filterBy.getParam().compareToIgnoreCase(Filters.NAME_LIKE.name()) == 0) {
                provFilesParamBuilder.filterLikeXAttr(MODEL_SUMMARY_XATTR_NAME + ".name", filterBy.getValue());
            } else if (filterBy.getParam().compareToIgnoreCase(Filters.VERSION.name()) == 0) {
                provFilesParamBuilder.filterByXAttr(MODEL_SUMMARY_XATTR_NAME + ".version", filterBy.getValue());
            } else if (filterBy.getParam().compareToIgnoreCase(Filters.ID_EQ.name()) == 0) {
                provFilesParamBuilder.filterByXAttr(MODEL_SUMMARY_XATTR_NAME + ".id", filterBy.getValue());
            } else if (filterBy.getParam().compareToIgnoreCase(Filters.USER.name()) == 0) {
                try {
                    filterUser = userFacade.find(Integer.parseInt(filterBy.getValue()));
                } catch (NumberFormatException e) {
                    throw new GenericException(RESTCodes.GenericErrorCode.ILLEGAL_ARGUMENT, Level.INFO, "expected int user id, found: " + filterBy.getValue());
                }
            } else if (filterBy.getParam().compareToIgnoreCase(Filters.USER_PROJECT.name()) == 0) {
                try {
                    filterUserProject = projectFacade.find(Integer.parseInt(filterBy.getValue()));
                } catch (NumberFormatException e) {
                    throw new GenericException(RESTCodes.GenericErrorCode.ILLEGAL_ARGUMENT, Level.INFO, "expected int user project id, found: " + filterBy.getValue());
                }
            } else {
                throw new GenericException(RESTCodes.GenericErrorCode.ILLEGAL_ARGUMENT, Level.INFO, "Filter by - found: " + filterBy.getParam() + " expected:" + EnumSet.allOf(Filters.class));
            }
        }
        if (filterUser != null) {
            ProjectTeam member = projectTeamFacade.findByPrimaryKey(filterUserProject, filterUser);
            if (member == null) {
                throw new GenericException(RESTCodes.GenericErrorCode.ILLEGAL_ARGUMENT, Level.INFO, "Selected user: " + filterUser.getUid() + " is not part of project:" + filterUserProject.getId());
            }
            String hdfsUserStr = hdfsUsersController.getHdfsUserName(filterUserProject, filterUser);
            HdfsUsers hdfsUsers = hdfsUsersFacade.findByName(hdfsUserStr);
            provFilesParamBuilder.filterByField(ProvStateParser.FieldsP.USER_ID, hdfsUsers.getId().toString());
        }
    }
    ModelRegistryDTO modelRegistryDTO = modelsController.getModelRegistry(modelRegistryProject);
    provFilesParamBuilder.filterByField(ProvStateParser.FieldsP.PROJECT_I_ID, modelRegistryDTO.getParentProject().getInode().getId()).filterByField(ProvStateParser.FieldsP.DATASET_I_ID, modelRegistryDTO.getDatasetInodeId());
    return Pair.with(provFilesParamBuilder, modelRegistryDTO);
}
Also used : ProvStateParamBuilder(io.hops.hopsworks.common.provenance.state.ProvStateParamBuilder) Project(io.hops.hopsworks.persistence.entity.project.Project) ProjectTeam(io.hops.hopsworks.persistence.entity.project.team.ProjectTeam) AbstractFacade(io.hops.hopsworks.common.dao.AbstractFacade) HdfsUsers(io.hops.hopsworks.persistence.entity.hdfs.user.HdfsUsers) Users(io.hops.hopsworks.persistence.entity.user.Users) ModelRegistryDTO(io.hops.hopsworks.api.modelregistry.dto.ModelRegistryDTO) GenericException(io.hops.hopsworks.exceptions.GenericException) HdfsUsers(io.hops.hopsworks.persistence.entity.hdfs.user.HdfsUsers)

Example 2 with HdfsUsers

use of io.hops.hopsworks.persistence.entity.hdfs.user.HdfsUsers in project hopsworks by logicalclocks.

the class JupyterService method startNotebookServer.

@POST
@Path("/start")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@AllowedProjectRoles({ AllowedProjectRoles.DATA_OWNER, AllowedProjectRoles.DATA_SCIENTIST })
@JWTRequired(acceptedTokens = { Audience.API }, allowedUserRoles = { "HOPS_ADMIN", "HOPS_USER" })
public Response startNotebookServer(JupyterSettings jupyterSettings, @Context HttpServletRequest req, @Context SecurityContext sc, @Context UriInfo uriInfo) throws ProjectException, HopsSecurityException, ServiceException, GenericException, JobException {
    Users hopsworksUser = jWTHelper.getUserPrincipal(sc);
    String hdfsUser = hdfsUsersController.getHdfsUserName(project, hopsworksUser);
    // from in the front-end
    if (jupyterSettings.getUsers() == null) {
        jupyterSettings.setUsers(hopsworksUser);
    }
    if (project.getPaymentType().equals(PaymentType.PREPAID)) {
        YarnProjectsQuota projectQuota = yarnProjectsQuotaFacade.findByProjectName(project.getName());
        if (projectQuota == null || projectQuota.getQuotaRemaining() <= 0) {
            throw new ProjectException(RESTCodes.ProjectErrorCode.PROJECT_QUOTA_ERROR, Level.FINE);
        }
    }
    if (project.getPythonEnvironment() == null) {
        throw new ProjectException(RESTCodes.ProjectErrorCode.ANACONDA_NOT_ENABLED, Level.FINE);
    }
    if (jupyterSettings.getMode() == null) {
        // set default mode for jupyter if mode is null
        jupyterSettings.setMode(JupyterMode.JUPYTER_LAB);
    }
    // Jupyter Git works only for JupyterLab
    if (jupyterSettings.isGitBackend() && jupyterSettings.getMode().equals(JupyterMode.JUPYTER_CLASSIC)) {
        throw new ServiceException(RESTCodes.ServiceErrorCode.JUPYTER_START_ERROR, Level.FINE, "Git support available only in JupyterLab");
    }
    // Do not allow auto push on shutdown if api key is missing
    GitConfig gitConfig = jupyterSettings.getGitConfig();
    if (jupyterSettings.isGitBackend() && gitConfig.getShutdownAutoPush() && Strings.isNullOrEmpty(gitConfig.getApiKeyName())) {
        throw new ServiceException(RESTCodes.ServiceErrorCode.JUPYTER_START_ERROR, Level.FINE, "Auto push not supported if api key is not configured.");
    }
    // Verify that API token has got write access on the repo if ShutdownAutoPush is enabled
    if (jupyterSettings.isGitBackend() && gitConfig.getShutdownAutoPush() && !jupyterNbVCSController.hasWriteAccess(hopsworksUser, gitConfig.getApiKeyName(), gitConfig.getRemoteGitURL(), gitConfig.getGitBackend())) {
        throw new ServiceException(RESTCodes.ServiceErrorCode.JUPYTER_START_ERROR, Level.FINE, "API token " + gitConfig.getApiKeyName() + " does not have write access on " + gitConfig.getRemoteGitURL());
    }
    JupyterProject jp = jupyterFacade.findByUser(hdfsUser);
    if (jp == null) {
        HdfsUsers user = hdfsUsersFacade.findByName(hdfsUser);
        String configSecret = DigestUtils.sha256Hex(Integer.toString(ThreadLocalRandom.current().nextInt()));
        JupyterDTO dto = null;
        DistributedFileSystemOps dfso = dfsService.getDfsOps();
        String allowOriginHost = uriInfo.getBaseUri().getHost();
        int allowOriginPort = uriInfo.getBaseUri().getPort();
        String allowOriginPortStr = allowOriginPort != -1 ? ":" + allowOriginPort : "";
        String allowOrigin = settings.getJupyterOriginScheme() + "://" + allowOriginHost + allowOriginPortStr;
        try {
            jupyterSettingsFacade.update(jupyterSettings);
            // Inspect dependencies
            sparkController.inspectDependencies(project, hopsworksUser, (SparkJobConfiguration) jupyterSettings.getJobConfig());
            dto = jupyterManager.startJupyterServer(project, configSecret, hdfsUser, hopsworksUser, jupyterSettings, allowOrigin);
            jupyterJWTManager.materializeJWT(hopsworksUser, project, jupyterSettings, dto.getCid(), dto.getPort(), JUPYTER_JWT_AUD);
            HopsUtils.materializeCertificatesForUserCustomDir(project.getName(), user.getUsername(), settings.getHdfsTmpCertDir(), dfso, certificateMaterializer, settings, dto.getCertificatesDir());
            jupyterManager.waitForStartup(project, hopsworksUser);
        } catch (ServiceException | TimeoutException ex) {
            if (dto != null) {
                jupyterController.shutdownQuietly(project, hdfsUser, hopsworksUser, configSecret, dto.getCid(), dto.getPort());
            }
            throw new ServiceException(RESTCodes.ServiceErrorCode.JUPYTER_START_ERROR, Level.SEVERE, ex.getMessage(), null, ex);
        } catch (IOException ex) {
            if (dto != null) {
                jupyterController.shutdownQuietly(project, hdfsUser, hopsworksUser, configSecret, dto.getCid(), dto.getPort());
            }
            throw new HopsSecurityException(RESTCodes.SecurityErrorCode.CERT_MATERIALIZATION_ERROR, Level.SEVERE, ex.getMessage(), null, ex);
        } finally {
            if (dfso != null) {
                dfsService.closeDfsClient(dfso);
            }
        }
        String externalIp = Ip.getHost(req.getRequestURL().toString());
        try {
            Date expirationDate = new Date();
            Calendar cal = Calendar.getInstance();
            cal.setTime(expirationDate);
            cal.add(Calendar.HOUR_OF_DAY, jupyterSettings.getShutdownLevel());
            expirationDate = cal.getTime();
            jp = jupyterFacade.saveServer(externalIp, project, configSecret, dto.getPort(), user.getId(), dto.getToken(), dto.getCid(), expirationDate, jupyterSettings.isNoLimit());
            // set minutes left until notebook server is killed
            Duration durationLeft = Duration.between(new Date().toInstant(), jp.getExpires().toInstant());
            jp.setMinutesUntilExpiration(durationLeft.toMinutes());
        } catch (Exception e) {
            LOGGER.log(Level.SEVERE, "Failed to save Jupyter notebook settings", e);
            jupyterController.shutdownQuietly(project, hdfsUser, hopsworksUser, configSecret, dto.getCid(), dto.getPort());
        }
        if (jp == null) {
            throw new ServiceException(RESTCodes.ServiceErrorCode.JUPYTER_SAVE_SETTINGS_ERROR, Level.SEVERE);
        }
        if (jupyterSettings.isGitBackend()) {
            try {
                // Init is idempotent, calling it on an already initialized repo won't affect it
                jupyterNbVCSController.init(jp, jupyterSettings);
                if (jupyterSettings.getGitConfig().getStartupAutoPull()) {
                    jupyterNbVCSController.pull(jp, jupyterSettings);
                }
            } catch (ServiceException ex) {
                jupyterController.shutdownQuietly(project, hdfsUser, hopsworksUser, configSecret, dto.getCid(), dto.getPort());
                throw ex;
            }
        }
    } else {
        throw new ServiceException(RESTCodes.ServiceErrorCode.JUPYTER_SERVER_ALREADY_RUNNING, Level.FINE);
    }
    return noCacheResponse.getNoCacheResponseBuilder(Response.Status.OK).entity(jp).build();
}
Also used : DistributedFileSystemOps(io.hops.hopsworks.common.hdfs.DistributedFileSystemOps) Calendar(java.util.Calendar) JupyterProject(io.hops.hopsworks.persistence.entity.jupyter.JupyterProject) Duration(java.time.Duration) HdfsUsers(io.hops.hopsworks.persistence.entity.hdfs.user.HdfsUsers) Users(io.hops.hopsworks.persistence.entity.user.Users) IOException(java.io.IOException) HdfsUsers(io.hops.hopsworks.persistence.entity.hdfs.user.HdfsUsers) Date(java.util.Date) TimeoutException(java.util.concurrent.TimeoutException) ProjectException(io.hops.hopsworks.exceptions.ProjectException) JobException(io.hops.hopsworks.exceptions.JobException) GenericException(io.hops.hopsworks.exceptions.GenericException) HopsSecurityException(io.hops.hopsworks.exceptions.HopsSecurityException) ElasticException(io.hops.hopsworks.exceptions.ElasticException) IOException(java.io.IOException) ServiceException(io.hops.hopsworks.exceptions.ServiceException) HopsSecurityException(io.hops.hopsworks.exceptions.HopsSecurityException) ProjectException(io.hops.hopsworks.exceptions.ProjectException) ServiceException(io.hops.hopsworks.exceptions.ServiceException) GitConfig(io.hops.hopsworks.persistence.entity.jupyter.config.GitConfig) YarnProjectsQuota(io.hops.hopsworks.persistence.entity.jobs.quota.YarnProjectsQuota) JupyterDTO(io.hops.hopsworks.common.dao.jupyter.config.JupyterDTO) TimeoutException(java.util.concurrent.TimeoutException) Path(javax.ws.rs.Path) POST(javax.ws.rs.POST) Consumes(javax.ws.rs.Consumes) Produces(javax.ws.rs.Produces) JWTRequired(io.hops.hopsworks.jwt.annotation.JWTRequired) AllowedProjectRoles(io.hops.hopsworks.api.filter.AllowedProjectRoles)

Example 3 with HdfsUsers

use of io.hops.hopsworks.persistence.entity.hdfs.user.HdfsUsers in project hopsworks by logicalclocks.

the class JupyterNotebooksBean method getHdfsUser.

public String getHdfsUser(JupyterProject notebook) {
    int hdfsId = notebook.getHdfsUserId();
    if (hdfsId == -1) {
        return "Orphaned";
    }
    HdfsUsers hdfsUser = hdfsUsersFacade.find(hdfsId);
    return hdfsUser.getName();
}
Also used : HdfsUsers(io.hops.hopsworks.persistence.entity.hdfs.user.HdfsUsers)

Example 4 with HdfsUsers

use of io.hops.hopsworks.persistence.entity.hdfs.user.HdfsUsers in project hopsworks by logicalclocks.

the class ExperimentsBuilder method buildFilter.

private Pair<ProvStateParamBuilder, Map<Long, ExperimentsEndpointDTO>> buildFilter(Project project, Set<? extends AbstractFacade.FilterBy> filters) throws ProvenanceException, GenericException, DatasetException {
    ProvStateParamBuilder provFilesParamBuilder = new ProvStateParamBuilder();
    Map<Long, ExperimentsEndpointDTO> selectedEndpoints = new HashMap<>();
    if (filters != null) {
        Users filterUser = null;
        Project filterUserProject = project;
        for (AbstractFacade.FilterBy filterBy : filters) {
            if (filterBy.getParam().compareToIgnoreCase(Filters.ENDPOINT_ID.name()) == 0) {
                ExperimentsEndpointDTO endpoint = verifyExperimentsEndpoint(project, filterBy.getValue());
                selectedEndpoints.put(endpoint.getParentProject().getInode().getId(), endpoint);
            } else if (filterBy.getParam().compareToIgnoreCase(Filters.NAME_LIKE.name()) == 0) {
                provFilesParamBuilder.filterLikeXAttr(EXPERIMENT_SUMMARY_XATTR_NAME + ".name", filterBy.getValue());
            } else if (filterBy.getParam().compareToIgnoreCase(Filters.NAME_EQ.name()) == 0) {
                provFilesParamBuilder.filterByXAttr(EXPERIMENT_SUMMARY_XATTR_NAME + ".name", filterBy.getValue());
            } else if (filterBy.getParam().compareToIgnoreCase(Filters.DATE_START_LT.name()) == 0) {
                Long timestamp = getDate(filterBy.getField(), filterBy.getValue()).getTime();
                provFilesParamBuilder.filterByField(ProvStateParser.FieldsPF.CREATE_TIMESTAMP_LT, timestamp);
            } else if (filterBy.getParam().compareToIgnoreCase(Filters.DATE_START_GT.name()) == 0) {
                Long timestamp = getDate(filterBy.getField(), filterBy.getValue()).getTime();
                provFilesParamBuilder.filterByField(ProvStateParser.FieldsPF.CREATE_TIMESTAMP_GT, timestamp);
            } else if (filterBy.getParam().compareToIgnoreCase(Filters.USER.name()) == 0) {
                try {
                    filterUser = userFacade.find(Integer.parseInt(filterBy.getValue()));
                } catch (NumberFormatException e) {
                    throw new GenericException(RESTCodes.GenericErrorCode.ILLEGAL_ARGUMENT, Level.INFO, "expected int user id, found: " + filterBy.getValue());
                }
            } else if (filterBy.getParam().compareToIgnoreCase(Filters.USER_PROJECT.name()) == 0) {
                try {
                    filterUserProject = projectFacade.find(Integer.parseInt(filterBy.getValue()));
                } catch (NumberFormatException e) {
                    throw new GenericException(RESTCodes.GenericErrorCode.ILLEGAL_ARGUMENT, Level.INFO, "expected int user project id, found: " + filterBy.getValue());
                }
            } else if (filterBy.getParam().compareToIgnoreCase(Filters.STATE.name()) == 0) {
                provFilesParamBuilder.filterLikeXAttr(EXPERIMENT_SUMMARY_XATTR_NAME + ".state", filterBy.getValue());
            } else if (filterBy.getParam().compareToIgnoreCase(Filters.ID_EQ.name()) == 0) {
                provFilesParamBuilder.filterByXAttr(EXPERIMENT_SUMMARY_XATTR_NAME + ".id", filterBy.getValue());
            } else {
                throw new GenericException(RESTCodes.GenericErrorCode.ILLEGAL_ARGUMENT, Level.INFO, "Filter by - found: " + filterBy.getParam() + " expected:" + EnumSet.allOf(Filters.class));
            }
        }
        if (filterUser != null) {
            ProjectTeam member = projectTeamFacade.findByPrimaryKey(filterUserProject, filterUser);
            if (member == null) {
                throw new GenericException(RESTCodes.GenericErrorCode.ILLEGAL_ARGUMENT, Level.INFO, "Selected user: " + filterUser.getUid() + " is not part of project:" + filterUserProject.getId());
            }
            String hdfsUserStr = hdfsUsersController.getHdfsUserName(filterUserProject, filterUser);
            HdfsUsers hdfsUsers = hdfsUsersFacade.findByName(hdfsUserStr);
            provFilesParamBuilder.filterByField(ProvStateParser.FieldsP.USER_ID, hdfsUsers.getId().toString());
        }
    }
    // an endpoint always has to be selected, if none provided, then all accessible endpoints are used
    if (selectedEndpoints.isEmpty()) {
        for (ExperimentsEndpointDTO endpoint : experimentsController.getExperimentsEndpoints(project)) {
            selectedEndpoints.put(endpoint.getParentProject().getInode().getId(), endpoint);
        }
    }
    for (ExperimentsEndpointDTO endpoint : selectedEndpoints.values()) {
        provFilesParamBuilder.filterByField(ProvStateParser.FieldsP.PROJECT_I_ID, endpoint.getParentProject().getInode().getId()).filterByField(ProvStateParser.FieldsP.DATASET_I_ID, endpoint.getDatasetInodeId());
    }
    return Pair.with(provFilesParamBuilder, selectedEndpoints);
}
Also used : HashMap(java.util.HashMap) HdfsUsers(io.hops.hopsworks.persistence.entity.hdfs.user.HdfsUsers) Users(io.hops.hopsworks.persistence.entity.user.Users) GenericException(io.hops.hopsworks.exceptions.GenericException) HdfsUsers(io.hops.hopsworks.persistence.entity.hdfs.user.HdfsUsers) ProvStateParamBuilder(io.hops.hopsworks.common.provenance.state.ProvStateParamBuilder) Project(io.hops.hopsworks.persistence.entity.project.Project) ProjectTeam(io.hops.hopsworks.persistence.entity.project.team.ProjectTeam) AbstractFacade(io.hops.hopsworks.common.dao.AbstractFacade) ExperimentsEndpointDTO(io.hops.hopsworks.api.experiments.dto.ExperimentsEndpointDTO)

Example 5 with HdfsUsers

use of io.hops.hopsworks.persistence.entity.hdfs.user.HdfsUsers in project hopsworks by logicalclocks.

the class PermissionsCleaner method testAndFixPermission.

private void testAndFixPermission(ProjectTeam projectTeam, DistributedFileSystemOps dfso, HdfsGroups hdfsDatasetGroup, HdfsGroups hdfsDatasetAclGroup, HdfsUsers owner, DatasetAccessPermission permission) throws IOException {
    if (projectTeam.getUser().getUsername().equals("srvmanager")) {
        // Does this user need to be in groups?
        return;
    }
    String hdfsUsername = hdfsUsersController.getHdfsUserName(projectTeam.getProject(), projectTeam.getUser());
    HdfsUsers hdfsUser = hdfsUsersController.getOrCreateUser(hdfsUsername, dfso);
    if (owner != null && owner.equals(hdfsUser)) {
        return;
    }
    switch(permission) {
        case EDITABLE:
            if (!hdfsDatasetGroup.hasUser(hdfsUser)) {
                addToGroup(hdfsUser, hdfsDatasetGroup, dfso);
            }
            if (hdfsDatasetAclGroup.hasUser(hdfsUser)) {
                removeFromGroup(hdfsUser, hdfsDatasetAclGroup, dfso);
            }
            break;
        case READ_ONLY:
            if (hdfsDatasetGroup.hasUser(hdfsUser)) {
                removeFromGroup(hdfsUser, hdfsDatasetGroup, dfso);
            }
            if (!hdfsDatasetAclGroup.hasUser(hdfsUser)) {
                addToGroup(hdfsUser, hdfsDatasetAclGroup, dfso);
            }
            break;
        case EDITABLE_BY_OWNERS:
            if (AllowedRoles.DATA_OWNER.equals(projectTeam.getTeamRole())) {
                if (!hdfsDatasetGroup.hasUser(hdfsUser)) {
                    addToGroup(hdfsUser, hdfsDatasetGroup, dfso);
                }
                if (hdfsDatasetAclGroup.hasUser(hdfsUser)) {
                    removeFromGroup(hdfsUser, hdfsDatasetAclGroup, dfso);
                }
            } else {
                if (hdfsDatasetGroup.hasUser(hdfsUser)) {
                    removeFromGroup(hdfsUser, hdfsDatasetGroup, dfso);
                }
                if (!hdfsDatasetAclGroup.hasUser(hdfsUser)) {
                    addToGroup(hdfsUser, hdfsDatasetAclGroup, dfso);
                }
            }
            break;
        default:
            LOGGER.log(Level.WARNING, "Found a dataset with an unknown permission: group={0}, project={1}", new Object[] { hdfsDatasetGroup, projectTeam.getProject().getName() });
    }
}
Also used : HdfsUsers(io.hops.hopsworks.persistence.entity.hdfs.user.HdfsUsers)

Aggregations

HdfsUsers (io.hops.hopsworks.persistence.entity.hdfs.user.HdfsUsers)17 Users (io.hops.hopsworks.persistence.entity.user.Users)7 JupyterProject (io.hops.hopsworks.persistence.entity.jupyter.JupyterProject)6 GenericException (io.hops.hopsworks.exceptions.GenericException)5 IOException (java.io.IOException)5 ServiceException (io.hops.hopsworks.exceptions.ServiceException)4 ProjectTeam (io.hops.hopsworks.persistence.entity.project.team.ProjectTeam)4 Date (java.util.Date)4 ServiceDiscoveryException (com.logicalclocks.servicediscoverclient.exceptions.ServiceDiscoveryException)3 ElasticException (io.hops.hopsworks.exceptions.ElasticException)3 HopsSecurityException (io.hops.hopsworks.exceptions.HopsSecurityException)3 JobException (io.hops.hopsworks.exceptions.JobException)3 ProjectException (io.hops.hopsworks.exceptions.ProjectException)3 HdfsGroups (io.hops.hopsworks.persistence.entity.hdfs.user.HdfsGroups)3 Project (io.hops.hopsworks.persistence.entity.project.Project)3 AlertManagerUnreachableException (io.hops.hopsworks.alert.exception.AlertManagerUnreachableException)2 AlertManagerClientCreateException (io.hops.hopsworks.alerting.exceptions.AlertManagerClientCreateException)2 AlertManagerConfigCtrlCreateException (io.hops.hopsworks.alerting.exceptions.AlertManagerConfigCtrlCreateException)2 AlertManagerConfigReadException (io.hops.hopsworks.alerting.exceptions.AlertManagerConfigReadException)2 AlertManagerConfigUpdateException (io.hops.hopsworks.alerting.exceptions.AlertManagerConfigUpdateException)2