use of io.hops.hopsworks.persistence.entity.jobs.quota.YarnProjectsQuota in project hopsworks by logicalclocks.
the class JupyterService method startNotebookServer.
@POST
@Path("/start")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@AllowedProjectRoles({ AllowedProjectRoles.DATA_OWNER, AllowedProjectRoles.DATA_SCIENTIST })
@JWTRequired(acceptedTokens = { Audience.API }, allowedUserRoles = { "HOPS_ADMIN", "HOPS_USER" })
public Response startNotebookServer(JupyterSettings jupyterSettings, @Context HttpServletRequest req, @Context SecurityContext sc, @Context UriInfo uriInfo) throws ProjectException, HopsSecurityException, ServiceException, GenericException, JobException {
Users hopsworksUser = jWTHelper.getUserPrincipal(sc);
String hdfsUser = hdfsUsersController.getHdfsUserName(project, hopsworksUser);
// from in the front-end
if (jupyterSettings.getUsers() == null) {
jupyterSettings.setUsers(hopsworksUser);
}
if (project.getPaymentType().equals(PaymentType.PREPAID)) {
YarnProjectsQuota projectQuota = yarnProjectsQuotaFacade.findByProjectName(project.getName());
if (projectQuota == null || projectQuota.getQuotaRemaining() <= 0) {
throw new ProjectException(RESTCodes.ProjectErrorCode.PROJECT_QUOTA_ERROR, Level.FINE);
}
}
if (project.getPythonEnvironment() == null) {
throw new ProjectException(RESTCodes.ProjectErrorCode.ANACONDA_NOT_ENABLED, Level.FINE);
}
if (jupyterSettings.getMode() == null) {
// set default mode for jupyter if mode is null
jupyterSettings.setMode(JupyterMode.JUPYTER_LAB);
}
// Jupyter Git works only for JupyterLab
if (jupyterSettings.isGitBackend() && jupyterSettings.getMode().equals(JupyterMode.JUPYTER_CLASSIC)) {
throw new ServiceException(RESTCodes.ServiceErrorCode.JUPYTER_START_ERROR, Level.FINE, "Git support available only in JupyterLab");
}
// Do not allow auto push on shutdown if api key is missing
GitConfig gitConfig = jupyterSettings.getGitConfig();
if (jupyterSettings.isGitBackend() && gitConfig.getShutdownAutoPush() && Strings.isNullOrEmpty(gitConfig.getApiKeyName())) {
throw new ServiceException(RESTCodes.ServiceErrorCode.JUPYTER_START_ERROR, Level.FINE, "Auto push not supported if api key is not configured.");
}
// Verify that API token has got write access on the repo if ShutdownAutoPush is enabled
if (jupyterSettings.isGitBackend() && gitConfig.getShutdownAutoPush() && !jupyterNbVCSController.hasWriteAccess(hopsworksUser, gitConfig.getApiKeyName(), gitConfig.getRemoteGitURL(), gitConfig.getGitBackend())) {
throw new ServiceException(RESTCodes.ServiceErrorCode.JUPYTER_START_ERROR, Level.FINE, "API token " + gitConfig.getApiKeyName() + " does not have write access on " + gitConfig.getRemoteGitURL());
}
JupyterProject jp = jupyterFacade.findByUser(hdfsUser);
if (jp == null) {
HdfsUsers user = hdfsUsersFacade.findByName(hdfsUser);
String configSecret = DigestUtils.sha256Hex(Integer.toString(ThreadLocalRandom.current().nextInt()));
JupyterDTO dto = null;
DistributedFileSystemOps dfso = dfsService.getDfsOps();
String allowOriginHost = uriInfo.getBaseUri().getHost();
int allowOriginPort = uriInfo.getBaseUri().getPort();
String allowOriginPortStr = allowOriginPort != -1 ? ":" + allowOriginPort : "";
String allowOrigin = settings.getJupyterOriginScheme() + "://" + allowOriginHost + allowOriginPortStr;
try {
jupyterSettingsFacade.update(jupyterSettings);
// Inspect dependencies
sparkController.inspectDependencies(project, hopsworksUser, (SparkJobConfiguration) jupyterSettings.getJobConfig());
dto = jupyterManager.startJupyterServer(project, configSecret, hdfsUser, hopsworksUser, jupyterSettings, allowOrigin);
jupyterJWTManager.materializeJWT(hopsworksUser, project, jupyterSettings, dto.getCid(), dto.getPort(), JUPYTER_JWT_AUD);
HopsUtils.materializeCertificatesForUserCustomDir(project.getName(), user.getUsername(), settings.getHdfsTmpCertDir(), dfso, certificateMaterializer, settings, dto.getCertificatesDir());
jupyterManager.waitForStartup(project, hopsworksUser);
} catch (ServiceException | TimeoutException ex) {
if (dto != null) {
jupyterController.shutdownQuietly(project, hdfsUser, hopsworksUser, configSecret, dto.getCid(), dto.getPort());
}
throw new ServiceException(RESTCodes.ServiceErrorCode.JUPYTER_START_ERROR, Level.SEVERE, ex.getMessage(), null, ex);
} catch (IOException ex) {
if (dto != null) {
jupyterController.shutdownQuietly(project, hdfsUser, hopsworksUser, configSecret, dto.getCid(), dto.getPort());
}
throw new HopsSecurityException(RESTCodes.SecurityErrorCode.CERT_MATERIALIZATION_ERROR, Level.SEVERE, ex.getMessage(), null, ex);
} finally {
if (dfso != null) {
dfsService.closeDfsClient(dfso);
}
}
String externalIp = Ip.getHost(req.getRequestURL().toString());
try {
Date expirationDate = new Date();
Calendar cal = Calendar.getInstance();
cal.setTime(expirationDate);
cal.add(Calendar.HOUR_OF_DAY, jupyterSettings.getShutdownLevel());
expirationDate = cal.getTime();
jp = jupyterFacade.saveServer(externalIp, project, configSecret, dto.getPort(), user.getId(), dto.getToken(), dto.getCid(), expirationDate, jupyterSettings.isNoLimit());
// set minutes left until notebook server is killed
Duration durationLeft = Duration.between(new Date().toInstant(), jp.getExpires().toInstant());
jp.setMinutesUntilExpiration(durationLeft.toMinutes());
} catch (Exception e) {
LOGGER.log(Level.SEVERE, "Failed to save Jupyter notebook settings", e);
jupyterController.shutdownQuietly(project, hdfsUser, hopsworksUser, configSecret, dto.getCid(), dto.getPort());
}
if (jp == null) {
throw new ServiceException(RESTCodes.ServiceErrorCode.JUPYTER_SAVE_SETTINGS_ERROR, Level.SEVERE);
}
if (jupyterSettings.isGitBackend()) {
try {
// Init is idempotent, calling it on an already initialized repo won't affect it
jupyterNbVCSController.init(jp, jupyterSettings);
if (jupyterSettings.getGitConfig().getStartupAutoPull()) {
jupyterNbVCSController.pull(jp, jupyterSettings);
}
} catch (ServiceException ex) {
jupyterController.shutdownQuietly(project, hdfsUser, hopsworksUser, configSecret, dto.getCid(), dto.getPort());
throw ex;
}
}
} else {
throw new ServiceException(RESTCodes.ServiceErrorCode.JUPYTER_SERVER_ALREADY_RUNNING, Level.FINE);
}
return noCacheResponse.getNoCacheResponseBuilder(Response.Status.OK).entity(jp).build();
}
use of io.hops.hopsworks.persistence.entity.jobs.quota.YarnProjectsQuota in project hopsworks by logicalclocks.
the class ProjectController method removeQuotas.
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
private void removeQuotas(Project project) {
YarnProjectsQuota yarnProjectsQuota = yarnProjectsQuotaFacade.findByProjectName(project.getName());
yarnProjectsQuotaFacade.remove(yarnProjectsQuota);
}
use of io.hops.hopsworks.persistence.entity.jobs.quota.YarnProjectsQuota in project hopsworks by logicalclocks.
the class ProjectController method setProjectOwnerAndQuotas.
public void setProjectOwnerAndQuotas(Project project, DistributedFileSystemOps dfso, Users user) throws IOException {
this.yarnProjectsQuotaFacade.persistYarnProjectsQuota(new YarnProjectsQuota(project.getName(), settings.getYarnDefaultQuota(), 0));
this.yarnProjectsQuotaFacade.flushEm();
// Here we set only the project quota. The HiveDB and Feature Store quotas are set in the HiveController
if (settings.getHdfsDefaultQuotaInMBs() > -1) {
dfso.setHdfsSpaceQuotaInMBs(new Path(Utils.getProjectPath(project.getName())), settings.getHdfsDefaultQuotaInMBs());
}
projectFacade.setTimestampQuotaUpdate(project, new Date());
// Add the activity information
logActivity(ActivityFacade.NEW_PROJECT + project.getName(), user, project, ActivityFlag.PROJECT);
// update role information in project
addProjectOwner(project, user);
LOGGER.log(Level.FINE, "{0} - project created successfully.", project.getName());
}
use of io.hops.hopsworks.persistence.entity.jobs.quota.YarnProjectsQuota in project hopsworks by logicalclocks.
the class YarnProjectsQuotaFacade method changeYarnQuota.
public void changeYarnQuota(String projectName, float quota) {
YarnProjectsQuota project = findByProjectName(projectName);
if (project != null) {
project.setQuotaRemaining(quota);
em.merge(project);
}
}
use of io.hops.hopsworks.persistence.entity.jobs.quota.YarnProjectsQuota in project hopsworks by logicalclocks.
the class AbstractExecutionController method start.
@Override
@TransactionAttribute(TransactionAttributeType.NOT_SUPPORTED)
public Execution start(Jobs job, String args, Users user) throws JobException, GenericException, ServiceException, ProjectException {
// If the limit for the number of executions for this job has been reached, return an error
checkExecutionLimit(job);
// A user should not be able to start a job if the project is prepaid and it doesn't have quota.
if (job.getProject().getPaymentType().equals(PaymentType.PREPAID)) {
YarnProjectsQuota projectQuota = yarnProjectsQuotaFacade.findByProjectName(job.getProject().getName());
if (projectQuota == null || projectQuota.getQuotaRemaining() <= 0) {
throw new ProjectException(RESTCodes.ProjectErrorCode.PROJECT_QUOTA_ERROR, Level.FINE);
}
}
// If enabled and nodemanagers are all offline throw an JobException exception
if (settings.isCheckingForNodemanagerStatusEnabled() && job.getJobType() != JobType.PYTHON) {
hostServicesFacade.findServices("nodemanager").stream().filter(s -> s.getStatus() == ServiceStatus.Started).findFirst().orElseThrow(() -> new JobException(RESTCodes.JobErrorCode.NODEMANAGERS_OFFLINE, Level.SEVERE));
}
Execution exec;
switch(job.getJobType()) {
case FLINK:
// Materialize certs
return flinkController.startJob(job, user);
case SPARK:
exec = sparkController.startJob(job, args, user);
if (exec == null) {
throw new IllegalArgumentException("Problem getting execution object for: " + job.getJobType());
}
SparkJobConfiguration config = (SparkJobConfiguration) job.getJobConfig();
String path = config.getAppPath();
String pathOfInode;
try {
pathOfInode = Utils.prepPath(path);
} catch (UnsupportedEncodingException ex) {
throw new JobException(RESTCodes.JobErrorCode.JOB_START_FAILED, Level.FINE, "Job name: " + job.getName(), ex.getMessage(), ex);
}
Inode inode = inodeController.getInodeAtPath(pathOfInode);
String inodeName = inode.getInodePK().getName();
activityFacade.persistActivity(ActivityFacade.EXECUTED_JOB + inodeName, job.getProject(), user, ActivityFlag.JOB);
break;
case PYSPARK:
if (job.getProject().getPythonEnvironment() == null) {
throw new ProjectException(RESTCodes.ProjectErrorCode.ANACONDA_NOT_ENABLED, Level.FINEST);
}
exec = sparkController.startJob(job, args, user);
if (exec == null) {
throw new IllegalArgumentException("Error while getting execution object for: " + job.getJobType());
}
break;
default:
throw new GenericException(RESTCodes.GenericErrorCode.UNKNOWN_ACTION, Level.FINE, "Unsupported job type: " + job.getJobType());
}
return exec;
}
Aggregations