use of io.hops.hopsworks.persistence.entity.hdfs.inode.Inode in project hopsworks by logicalclocks.
the class DelaProjectService method publish.
@POST
@Path("/uploads")
@Produces(MediaType.APPLICATION_JSON)
@AllowedProjectRoles({ AllowedProjectRoles.DATA_OWNER })
@JWTRequired(acceptedTokens = { Audience.API }, allowedUserRoles = { "HOPS_ADMIN", "HOPS_USER" })
public Response publish(@Context SecurityContext sc, InodeIdDTO inodeId) throws DelaException {
Inode inode = getInode(inodeId.getId());
Dataset dataset = getDatasetByInode(inode);
Users user = jWTHelper.getUserPrincipal(sc);
delaWorkerCtrl.shareDatasetWithHops(project, dataset, user);
RESTApiJsonResponse json = new RESTApiJsonResponse();
json.setSuccessMessage("Dataset transfer is started - published");
return successResponse(json);
}
use of io.hops.hopsworks.persistence.entity.hdfs.inode.Inode in project hopsworks by logicalclocks.
the class ElasticHitsBuilder method buildElasticInodes.
public void buildElasticInodes(SearchHit hit, ElasticInodeDTO elasticInodeDTO) {
ElasticInodeDTO item = new ElasticInodeDTO();
item.setMap(hit.getSourceAsMap());
item.setScore(hit.getScore());
item.setInodeId(Long.parseLong(hit.getId()));
item.setParentDatasetIId(getLongValue(hit, "dataset_id"));
item.setParentProjectId(getIntValue(hit, "project_id"));
item.setName(getStringValue(hit, "name"));
item.setCreator(getStringValue(hit, "user"));
item.setDescription(getStringValue(hit, "description"));
item.setSize(getLongValue(hit, "size"));
item.setHighlights(hit.getHighlightFields());
if (elasticInodeDTO.getItems() == null) {
elasticInodeDTO.setItems(new ArrayList<>());
}
if (item.getParentDatasetIId() != null) {
Dataset dataset = datasetController.getDatasetByInodeId(item.getParentDatasetIId());
if (dataset != null) {
item.setParentDatasetId(dataset.getId());
item.setParentDatasetName(dataset.getName());
item.setModificationTime(new Date(dataset.getInode().getModificationTime().longValue()));
}
}
if (item.getInodeId() != null) {
Inode inode = inodeFacade.findById(item.getInodeId());
if (inode != null) {
item.setPath(inodeController.getPath(inode));
}
}
item.setCreator(setUserName(item.getCreator()));
elasticInodeDTO.getItems().add(item);
}
use of io.hops.hopsworks.persistence.entity.hdfs.inode.Inode in project hopsworks by logicalclocks.
the class DatasetBuilder method buildItems.
/**
* Build a list of Datasets
*
* @param uriInfo
* @param resourceRequest
* @param project
* @return
*/
public DatasetDTO buildItems(UriInfo uriInfo, ResourceRequest resourceRequest, ResourceRequest sharedDatasetResourceRequest, Project project, Users user) throws DatasetException, MetadataException, SchematizedTagException {
Inode parent = project.getInode();
datasetHelper.checkResourceRequestLimit(resourceRequest, parent.getChildrenNum());
String parentPath = inodeController.getPath(parent);
Users dirOwner = userFacade.findByUsername(parent.getHdfsUser().getUsername());
return items(new DatasetDTO(), uriInfo, resourceRequest, sharedDatasetResourceRequest, project, user, parentPath, dirOwner);
}
use of io.hops.hopsworks.persistence.entity.hdfs.inode.Inode in project hopsworks by logicalclocks.
the class RequestService method requestAccess.
@POST
@Path("/access")
@Produces(MediaType.APPLICATION_JSON)
public Response requestAccess(RequestDTO requestDTO, @Context SecurityContext sc) throws DatasetException, ProjectException {
RESTApiJsonResponse json = new RESTApiJsonResponse();
if (requestDTO == null || requestDTO.getInodeId() == null || requestDTO.getProjectId() == null) {
throw new IllegalArgumentException("requestDTO was not provided or was incomplete!");
}
Users user = jWTHelper.getUserPrincipal(sc);
Inode inode = inodes.findById(requestDTO.getInodeId());
// requested project
Project proj = datasetCtrl.getOwningProject(inode);
Dataset ds = datasetFacade.findByProjectAndInode(proj, inode);
// requesting project
Project project = projectFacade.find(requestDTO.getProjectId());
Dataset dsInRequesting = datasetFacade.findByProjectAndInode(project, inode);
if (dsInRequesting != null) {
throw new DatasetException(RESTCodes.DatasetErrorCode.DESTINATION_EXISTS, Level.INFO);
}
ProjectTeam projectTeam = projectTeamFacade.findByPrimaryKey(project, user);
ProjectTeam projTeam = projectTeamFacade.findByPrimaryKey(proj, user);
if (projTeam != null && proj.equals(project)) {
throw new ProjectException(RESTCodes.ProjectErrorCode.TEAM_MEMBER_ALREADY_EXISTS, Level.FINE);
}
DatasetRequest dsRequest = datasetRequest.findByProjectAndDataset(project, ds);
// email body
String msg = "Hi " + proj.getOwner().getFname() + " " + proj.getOwner().getLname() + ", \n\n" + user.getFname() + " " + user.getLname() + " wants access to a dataset in a project you own. \n\n" + "Dataset name: " + ds.getInode().getInodePK().getName() + "\n" + "Project name: " + proj.getName() + "\n";
if (!Strings.isNullOrEmpty(requestDTO.getMessageContent())) {
msg += "Attached message: " + requestDTO.getMessageContent() + "\n";
}
msg += "After logging in to Hopsworks go to : /project/" + proj.getId() + "/datasets " + " if you want to share this dataset. \n";
// or the prior request is from a data owner do nothing.
if (dsRequest != null && (dsRequest.getProjectTeam().getTeamRole().equals(projectTeam.getTeamRole()) || dsRequest.getProjectTeam().getTeamRole().equals(AllowedProjectRoles.DATA_OWNER))) {
throw new DatasetException(RESTCodes.DatasetErrorCode.DATASET_REQUEST_EXISTS, Level.FINE);
} else if (dsRequest != null && projectTeam.getTeamRole().equals(AllowedProjectRoles.DATA_OWNER)) {
dsRequest.setProjectTeam(projectTeam);
dsRequest.setMessageContent(requestDTO.getMessageContent());
datasetRequest.merge(dsRequest);
} else {
Users to = userFacade.findByEmail(proj.getOwner().getEmail());
String message = "Hi " + to.getFname() + "<br>" + "I would like to request access to a dataset in a project you own. <br>" + "Project name: " + proj.getName() + "<br>" + "Dataset name: " + ds.getInode().getInodePK().getName() + "<br>" + "To be shared with my project: " + project.getName() + ".<br>" + "Thank you in advance.";
String preview = user.getFname() + " would like to have access to a dataset in a project you own.";
String subject = Settings.MESSAGE_DS_REQ_SUBJECT;
String path = "project/" + proj.getId() + "/datasets";
// to, from, msg, requested path
Message newMsg = new Message(user, to, null, message, true, false);
newMsg.setPath(path);
newMsg.setSubject(subject);
newMsg.setPreview(preview);
messageBean.send(newMsg);
dsRequest = new DatasetRequest(ds, projectTeam, requestDTO.getMessageContent(), newMsg);
try {
datasetRequest.persistDataset(dsRequest);
} catch (Exception ex) {
messageBean.remove(newMsg);
throw new DatasetException(RESTCodes.DatasetErrorCode.DATASET_REQUEST_ERROR, Level.WARNING, ex.getMessage(), null, ex);
}
}
try {
emailBean.sendEmail(proj.getOwner().getEmail(), RecipientType.TO, "Access request for dataset " + ds.getInode().getInodePK().getName(), msg);
} catch (MessagingException ex) {
json.setErrorMsg("Could not send e-mail to " + project.getOwner().getEmail());
datasetRequest.remove(dsRequest);
return noCacheResponse.getNoCacheResponseBuilder(Response.Status.OK).entity(json).build();
}
json.setSuccessMessage("Request sent successfully.");
return noCacheResponse.getNoCacheResponseBuilder(Response.Status.OK).entity(json).build();
}
use of io.hops.hopsworks.persistence.entity.hdfs.inode.Inode in project hopsworks by logicalclocks.
the class UploadService method confFileUpload.
/**
* Sets the upload path for the file to be uploaded.
* <p/>
* @param datasetPath the dsPath object built by the DatasetService.java
* @throws DatasetException DatasetException
*/
private void confFileUpload(DatasetPath datasetPath) {
if (datasetPath.getRelativePath() != null) {
// We need to validate that each component of the path, either it exists
// or it is a valid directory name
String[] dsPathComponents = datasetPath.getDatasetRelativePath().split(File.separator);
// Used to compute the partition id. Start from the depth of the Ds dir
int depth = datasetController.getDatasetPath(datasetPath.getDataset()).depth() + 1;
Inode parent = datasetPath.getDataset().getInode();
boolean exist = true;
for (String dirName : dsPathComponents) {
if (parent != null) {
int pathLen = depth;
long partitionId = HopsUtils.calculatePartitionId(parent.getId(), dirName, pathLen);
parent = inodes.findByInodePK(parent, dirName, partitionId);
depth += 1;
} else {
exist = false;
}
}
// if the path exists check if the file exists.
if (exist) {
this.fileParent = parent;
}
} else {
// The user is trying to upload directly in a dataset.
// We are sure the dir exists and the inode is the dataset inode
this.fileParent = datasetPath.getDataset().getInode();
}
this.path = datasetPath.getFullPath().toString();
}
Aggregations