use of io.hops.hopsworks.exceptions.HopsSecurityException in project hopsworks by logicalclocks.
the class JupyterService method startNotebookServer.
@POST
@Path("/start")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@AllowedProjectRoles({ AllowedProjectRoles.DATA_OWNER, AllowedProjectRoles.DATA_SCIENTIST })
@JWTRequired(acceptedTokens = { Audience.API }, allowedUserRoles = { "HOPS_ADMIN", "HOPS_USER" })
public Response startNotebookServer(JupyterSettings jupyterSettings, @Context HttpServletRequest req, @Context SecurityContext sc, @Context UriInfo uriInfo) throws ProjectException, HopsSecurityException, ServiceException, GenericException, JobException {
Users hopsworksUser = jWTHelper.getUserPrincipal(sc);
String hdfsUser = hdfsUsersController.getHdfsUserName(project, hopsworksUser);
// from in the front-end
if (jupyterSettings.getUsers() == null) {
jupyterSettings.setUsers(hopsworksUser);
}
if (project.getPaymentType().equals(PaymentType.PREPAID)) {
YarnProjectsQuota projectQuota = yarnProjectsQuotaFacade.findByProjectName(project.getName());
if (projectQuota == null || projectQuota.getQuotaRemaining() <= 0) {
throw new ProjectException(RESTCodes.ProjectErrorCode.PROJECT_QUOTA_ERROR, Level.FINE);
}
}
if (project.getPythonEnvironment() == null) {
throw new ProjectException(RESTCodes.ProjectErrorCode.ANACONDA_NOT_ENABLED, Level.FINE);
}
if (jupyterSettings.getMode() == null) {
// set default mode for jupyter if mode is null
jupyterSettings.setMode(JupyterMode.JUPYTER_LAB);
}
// Jupyter Git works only for JupyterLab
if (jupyterSettings.isGitBackend() && jupyterSettings.getMode().equals(JupyterMode.JUPYTER_CLASSIC)) {
throw new ServiceException(RESTCodes.ServiceErrorCode.JUPYTER_START_ERROR, Level.FINE, "Git support available only in JupyterLab");
}
// Do not allow auto push on shutdown if api key is missing
GitConfig gitConfig = jupyterSettings.getGitConfig();
if (jupyterSettings.isGitBackend() && gitConfig.getShutdownAutoPush() && Strings.isNullOrEmpty(gitConfig.getApiKeyName())) {
throw new ServiceException(RESTCodes.ServiceErrorCode.JUPYTER_START_ERROR, Level.FINE, "Auto push not supported if api key is not configured.");
}
// Verify that API token has got write access on the repo if ShutdownAutoPush is enabled
if (jupyterSettings.isGitBackend() && gitConfig.getShutdownAutoPush() && !jupyterNbVCSController.hasWriteAccess(hopsworksUser, gitConfig.getApiKeyName(), gitConfig.getRemoteGitURL(), gitConfig.getGitBackend())) {
throw new ServiceException(RESTCodes.ServiceErrorCode.JUPYTER_START_ERROR, Level.FINE, "API token " + gitConfig.getApiKeyName() + " does not have write access on " + gitConfig.getRemoteGitURL());
}
JupyterProject jp = jupyterFacade.findByUser(hdfsUser);
if (jp == null) {
HdfsUsers user = hdfsUsersFacade.findByName(hdfsUser);
String configSecret = DigestUtils.sha256Hex(Integer.toString(ThreadLocalRandom.current().nextInt()));
JupyterDTO dto = null;
DistributedFileSystemOps dfso = dfsService.getDfsOps();
String allowOriginHost = uriInfo.getBaseUri().getHost();
int allowOriginPort = uriInfo.getBaseUri().getPort();
String allowOriginPortStr = allowOriginPort != -1 ? ":" + allowOriginPort : "";
String allowOrigin = settings.getJupyterOriginScheme() + "://" + allowOriginHost + allowOriginPortStr;
try {
jupyterSettingsFacade.update(jupyterSettings);
// Inspect dependencies
sparkController.inspectDependencies(project, hopsworksUser, (SparkJobConfiguration) jupyterSettings.getJobConfig());
dto = jupyterManager.startJupyterServer(project, configSecret, hdfsUser, hopsworksUser, jupyterSettings, allowOrigin);
jupyterJWTManager.materializeJWT(hopsworksUser, project, jupyterSettings, dto.getCid(), dto.getPort(), JUPYTER_JWT_AUD);
HopsUtils.materializeCertificatesForUserCustomDir(project.getName(), user.getUsername(), settings.getHdfsTmpCertDir(), dfso, certificateMaterializer, settings, dto.getCertificatesDir());
jupyterManager.waitForStartup(project, hopsworksUser);
} catch (ServiceException | TimeoutException ex) {
if (dto != null) {
jupyterController.shutdownQuietly(project, hdfsUser, hopsworksUser, configSecret, dto.getCid(), dto.getPort());
}
throw new ServiceException(RESTCodes.ServiceErrorCode.JUPYTER_START_ERROR, Level.SEVERE, ex.getMessage(), null, ex);
} catch (IOException ex) {
if (dto != null) {
jupyterController.shutdownQuietly(project, hdfsUser, hopsworksUser, configSecret, dto.getCid(), dto.getPort());
}
throw new HopsSecurityException(RESTCodes.SecurityErrorCode.CERT_MATERIALIZATION_ERROR, Level.SEVERE, ex.getMessage(), null, ex);
} finally {
if (dfso != null) {
dfsService.closeDfsClient(dfso);
}
}
String externalIp = Ip.getHost(req.getRequestURL().toString());
try {
Date expirationDate = new Date();
Calendar cal = Calendar.getInstance();
cal.setTime(expirationDate);
cal.add(Calendar.HOUR_OF_DAY, jupyterSettings.getShutdownLevel());
expirationDate = cal.getTime();
jp = jupyterFacade.saveServer(externalIp, project, configSecret, dto.getPort(), user.getId(), dto.getToken(), dto.getCid(), expirationDate, jupyterSettings.isNoLimit());
// set minutes left until notebook server is killed
Duration durationLeft = Duration.between(new Date().toInstant(), jp.getExpires().toInstant());
jp.setMinutesUntilExpiration(durationLeft.toMinutes());
} catch (Exception e) {
LOGGER.log(Level.SEVERE, "Failed to save Jupyter notebook settings", e);
jupyterController.shutdownQuietly(project, hdfsUser, hopsworksUser, configSecret, dto.getCid(), dto.getPort());
}
if (jp == null) {
throw new ServiceException(RESTCodes.ServiceErrorCode.JUPYTER_SAVE_SETTINGS_ERROR, Level.SEVERE);
}
if (jupyterSettings.isGitBackend()) {
try {
// Init is idempotent, calling it on an already initialized repo won't affect it
jupyterNbVCSController.init(jp, jupyterSettings);
if (jupyterSettings.getGitConfig().getStartupAutoPull()) {
jupyterNbVCSController.pull(jp, jupyterSettings);
}
} catch (ServiceException ex) {
jupyterController.shutdownQuietly(project, hdfsUser, hopsworksUser, configSecret, dto.getCid(), dto.getPort());
throw ex;
}
}
} else {
throw new ServiceException(RESTCodes.ServiceErrorCode.JUPYTER_SERVER_ALREADY_RUNNING, Level.FINE);
}
return noCacheResponse.getNoCacheResponseBuilder(Response.Status.OK).entity(jp).build();
}
use of io.hops.hopsworks.exceptions.HopsSecurityException in project hopsworks by logicalclocks.
the class DelaTrackerCertController method signCsr.
public CSR signCsr(String userEmail, CSR csr) throws IOException, HopsSecurityException, GenericException, DelaCSRCheckException {
ClusterCert clusterCert = checkCSR(userEmail, csr);
CSR signedCert = certificatesController.signDelaClusterCertificate(csr);
String certSerialNumber;
try {
certSerialNumber = String.valueOf(certificatesController.extractSerialNumberFromCert(signedCert.getSignedCert()));
} catch (CertificateException e) {
throw new HopsSecurityException(RESTCodes.SecurityErrorCode.CERT_CREATION_ERROR, Level.WARNING, null, null, e);
}
clusterCert.setSerialNumber(certSerialNumber);
clusterCertFacade.update(clusterCert);
return signedCert;
}
use of io.hops.hopsworks.exceptions.HopsSecurityException in project hopsworks by logicalclocks.
the class DsUpdateOperations method deleteDatasetFile.
/**
* Deletes a file inside a top-level dataset
*
* @param project the project of the user making the request
* @param user the user making the request
* @param fileName the name of the folder or file to remove
* @return the fullpath of the deleted file
* @throws DatasetException
* @throws ProjectException
*/
public org.apache.hadoop.fs.Path deleteDatasetFile(Project project, Users user, String fileName) throws DatasetException, ProjectException, HopsSecurityException, UnsupportedEncodingException {
boolean success = false;
DistributedFileSystemOps dfso = null;
DsPath dsPath = pathValidator.validatePath(project, fileName);
Dataset ds = dsPath.getDs();
org.apache.hadoop.fs.Path fullPath = dsPath.getFullPath();
org.apache.hadoop.fs.Path dsRelativePath = dsPath.getDsRelativePath();
if (dsRelativePath.depth() == 0) {
throw new IllegalArgumentException("Use endpoint DELETE /{datasetName} to delete top level dataset)");
}
try {
String username = hdfsUsersBean.getHdfsUserName(project, user);
// If a Data Scientist requested it, do it as project user to avoid deleting Data Owner files
// Find project of dataset as it might be shared
Project owning = datasetController.getOwningProject(ds);
boolean isMember = projectTeamFacade.isUserMemberOfProject(owning, user);
if (isMember && projectTeamFacade.findCurrentRole(owning, user).equals(AllowedProjectRoles.DATA_OWNER) && owning.equals(project)) {
// do it as super user
dfso = dfs.getDfsOps();
} else {
// do it as project user
dfso = dfs.getDfsOps(username);
}
success = dfso.rm(fullPath, true);
} catch (AccessControlException ex) {
throw new HopsSecurityException(RESTCodes.SecurityErrorCode.HDFS_ACCESS_CONTROL, Level.FINE, "Operation: delete, path: " + fullPath.toString(), ex.getMessage(), ex);
} catch (IOException ex) {
throw new DatasetException(RESTCodes.DatasetErrorCode.INODE_DELETION_ERROR, Level.SEVERE, "path: " + fullPath.toString(), ex.getMessage(), ex);
} finally {
if (dfso != null) {
dfs.closeDfsClient(dfso);
}
}
if (!success) {
throw new DatasetException(RESTCodes.DatasetErrorCode.INODE_DELETION_ERROR, Level.FINE, "path: " + fullPath.toString());
}
return fullPath;
}
use of io.hops.hopsworks.exceptions.HopsSecurityException in project hopsworks by logicalclocks.
the class ProjectService method example.
@POST
@Path("starterProject/{type}")
@Produces(MediaType.APPLICATION_JSON)
public Response example(@PathParam("type") String type, @Context HttpServletRequest req, @Context SecurityContext sc) throws DatasetException, GenericException, KafkaException, ProjectException, UserException, ServiceException, HopsSecurityException, FeaturestoreException, JobException, IOException, ElasticException, SchemaException, ProvenanceException {
TourProjectType demoType;
try {
demoType = TourProjectType.fromString(type);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Type must be one of: " + Arrays.toString(TourProjectType.values()));
}
ProjectDTO projectDTO = new ProjectDTO();
Project project = null;
projectDTO.setDescription("A demo project for getting started with " + demoType.getDescription());
Users user = jWTHelper.getUserPrincipal(sc);
String username = user.getUsername();
List<String> projectServices = new ArrayList<>();
// save the project
String readMeMessage = null;
switch(demoType) {
case KAFKA:
// It's a Kafka guide
projectDTO.setProjectName("demo_" + TourProjectType.KAFKA.getTourName() + "_" + username);
populateActiveServices(projectServices, TourProjectType.KAFKA);
readMeMessage = "jar file to demonstrate Kafka streaming";
break;
case SPARK:
// It's a Spark guide
projectDTO.setProjectName("demo_" + TourProjectType.SPARK.getTourName() + "_" + username);
populateActiveServices(projectServices, TourProjectType.SPARK);
readMeMessage = "jar file to demonstrate the creation of a spark batch job";
break;
case FS:
// It's a Featurestore guide
projectDTO.setProjectName("demo_" + TourProjectType.FS.getTourName() + "_" + username);
populateActiveServices(projectServices, TourProjectType.FS);
readMeMessage = "Dataset containing a jar file and data that can be used to run a sample spark-job for " + "inserting data in the feature store.";
break;
case ML:
// It's a TensorFlow guide
projectDTO.setProjectName("demo_" + TourProjectType.ML.getTourName() + "_" + username);
populateActiveServices(projectServices, TourProjectType.ML);
readMeMessage = "Jupyter notebooks and training data for demonstrating how to run Deep Learning";
break;
default:
throw new IllegalArgumentException("Type must be one of: " + Arrays.toString(TourProjectType.values()));
}
projectDTO.setServices(projectServices);
DistributedFileSystemOps dfso = null;
DistributedFileSystemOps udfso = null;
try {
project = projectController.createProject(projectDTO, user, req.getSession().getId());
dfso = dfs.getDfsOps();
username = hdfsUsersBean.getHdfsUserName(project, user);
udfso = dfs.getDfsOps(username);
ProvTypeDTO projectMetaStatus = fsProvenanceController.getProjectProvType(user, project);
String tourFilesDataset = projectController.addTourFilesToProject(user.getEmail(), project, dfso, dfso, demoType, projectMetaStatus);
// TestJob dataset
datasetController.generateReadme(udfso, tourFilesDataset, readMeMessage, project.getName());
} catch (Exception ex) {
projectController.cleanup(project, req.getSession().getId());
throw ex;
} finally {
if (dfso != null) {
dfso.close();
}
if (udfso != null) {
dfs.closeDfsClient(udfso);
}
}
return noCacheResponse.getNoCacheResponseBuilder(Response.Status.CREATED).entity(project).build();
}
use of io.hops.hopsworks.exceptions.HopsSecurityException in project hopsworks by logicalclocks.
the class DatasetController method createFolder.
/**
* Creates a folder in HDFS at the given path, and associates a template with
* that folder.
* <p/>
* @param path The full HDFS path to the folder to be created (e.g.
* /Projects/projectA/datasetB/folder1/folder2).
* @param template The id of the template to be associated with the created
* folder.
* @return
* @throws HopsSecurityException
*/
private boolean createFolder(String path, FsPermission fsPermission, DistributedFileSystemOps dfso) throws HopsSecurityException {
boolean success;
Path location = new Path(path);
try {
if (fsPermission == null) {
fsPermission = dfso.getParentPermission(location);
}
success = dfso.mkdir(location, fsPermission);
if (success) {
dfso.setPermission(location, fsPermission);
}
} catch (IOException ex) {
throw new HopsSecurityException(RESTCodes.SecurityErrorCode.HDFS_ACCESS_CONTROL, Level.WARNING, "Permission denied: path=" + path, ex.getMessage(), ex);
}
return success;
}
Aggregations