use of io.hops.hopsworks.exceptions.DatasetException in project hopsworks by logicalclocks.
the class DatasetController method delete.
public void delete(Project project, Users user, Path fullPath, Dataset dataset, boolean isDataset) throws DatasetException {
boolean success;
String username = hdfsUsersController.getHdfsUserName(project, user);
Project owning = getOwningProject(dataset);
DistributedFileSystemOps dfso = null;
if (isDataset && dataset.isShared(project)) {
// The user is trying to delete a dataset. Drop it from the table
// But leave it in hopsfs because the user doesn't have the right to delete it
unshare(project, user, dataset, project.getName());
} else {
try {
// If a Data Scientist requested it, do it as project user to avoid deleting Data Owner files
// Find project of dataset as it might be shared
boolean isMember = projectTeamFacade.isUserMemberOfProject(owning, user);
if (isMember && projectTeamFacade.findCurrentRole(owning, user).equals(AllowedRoles.DATA_OWNER) && owning.equals(project)) {
// do it as super user
dfso = dfs.getDfsOps();
} else {
// do it as project user
dfso = dfs.getDfsOps(username);
}
if (isDataset) {
success = deleteDatasetDir(dataset, fullPath, dfso);
} else {
success = dfso.rm(fullPath, true);
}
} catch (AccessControlException ae) {
throw new DatasetException(RESTCodes.DatasetErrorCode.DATASET_ACCESS_PERMISSION_DENIED, Level.FINE, "path: " + fullPath.toString(), ae.getMessage(), ae);
} catch (FileNotFoundException fnfe) {
throw new DatasetException(RESTCodes.DatasetErrorCode.INODE_NOT_FOUND, Level.FINE, "path: " + fullPath.toString(), fnfe.getMessage(), fnfe);
} catch (IOException ex) {
throw new DatasetException(RESTCodes.DatasetErrorCode.INODE_DELETION_ERROR, Level.SEVERE, "path: " + fullPath.toString(), ex.getMessage(), ex);
} finally {
dfs.closeDfsClient(dfso);
}
if (!success) {
throw new DatasetException(RESTCodes.DatasetErrorCode.INODE_DELETION_ERROR, Level.FINE, "path: " + fullPath.toString());
}
if (isDataset) {
// remove the groups associated with this dataset as it is a toplevel ds
try {
hdfsUsersController.deleteDatasetGroups(project, dataset);
} catch (IOException ex) {
// FIXME: take an action?
LOGGER.log(Level.WARNING, "Error while trying to delete the dataset groups", ex);
}
}
}
}
use of io.hops.hopsworks.exceptions.DatasetException in project hopsworks by logicalclocks.
the class DatasetController method share.
public void share(String targetProjectName, String fullPath, DatasetAccessPermission permission, Project project, Users user) throws DatasetException, ProjectException {
Project targetProject = projectFacade.findByName(targetProjectName);
Dataset ds = getByProjectAndFullPath(project, fullPath);
if (targetProject == null) {
throw new ProjectException(RESTCodes.ProjectErrorCode.PROJECT_NOT_FOUND, Level.FINE, "Target project not found.");
}
DatasetSharedWith datasetSharedWith = shareInternal(targetProject, ds, user, permission);
if (DatasetType.FEATURESTORE.equals(ds.getDsType()) && datasetSharedWith.getAccepted()) {
Dataset trainingDataset = getTrainingDataset(project);
if (trainingDataset != null) {
try {
shareInternal(targetProject, trainingDataset, user, permission);
} catch (DatasetException de) {
// Dataset already shared nothing to do
}
}
// If we migrate Training Datasets to remove the project prefix, these methods can be reused
shareFeatureStoreServiceDataset(user, project, targetProject, permission, Settings.ServiceDataset.STATISTICS);
}
}
use of io.hops.hopsworks.exceptions.DatasetException in project hopsworks by logicalclocks.
the class DatasetController method unzip.
public void unzip(Project project, Users user, Path path, Path destPath) throws DatasetException {
String hdfsUser = hdfsUsersController.getHdfsUserName(project, user);
checkFileExists(path, hdfsUser);
CompressionInfo compressionInfo = new CompressionInfo(path, destPath);
String stagingDir = settings.getStagingDir() + File.separator + compressionInfo.getStagingDirectory();
File unzipDir = new File(stagingDir);
unzipDir.mkdirs();
settings.addUnzippingState(compressionInfo);
ProcessDescriptor.Builder processDescriptorBuilder = new ProcessDescriptor.Builder().addCommand(settings.getHopsworksDomainDir() + "/bin/unzip-background.sh").addCommand(stagingDir).addCommand(path.toString()).addCommand(hdfsUser);
if (destPath != null) {
processDescriptorBuilder.addCommand(destPath.toString());
}
ProcessDescriptor processDescriptor = processDescriptorBuilder.ignoreOutErrStreams(true).build();
try {
ProcessResult processResult = osProcessExecutor.execute(processDescriptor);
int result = processResult.getExitCode();
if (result == 2) {
throw new DatasetException(RESTCodes.DatasetErrorCode.COMPRESSION_SIZE_ERROR, Level.WARNING);
}
if (result != 0) {
throw new DatasetException(RESTCodes.DatasetErrorCode.COMPRESSION_ERROR, Level.WARNING, "path: " + path.toString() + ", result: " + result);
}
} catch (IOException ex) {
throw new DatasetException(RESTCodes.DatasetErrorCode.COMPRESSION_ERROR, Level.SEVERE, "path: " + path.toString(), ex.getMessage(), ex);
}
}
use of io.hops.hopsworks.exceptions.DatasetException in project hopsworks by logicalclocks.
the class DatasetController method deleteCorrupted.
public void deleteCorrupted(Project project, Users user, Path fullPath, Dataset dataset) throws DatasetException {
DistributedFileSystemOps dfso = null;
try {
// If a Data Scientist requested it, do it as project user to avoid deleting Data Owner files
// Find project of dataset as it might be shared
Project owning = getOwningProject(dataset);
boolean isMember = projectTeamFacade.isUserMemberOfProject(owning, user);
if (isMember && owning.equals(project)) {
// do it as super user
dfso = dfs.getDfsOps();
FileStatus fs = dfso.getFileStatus(fullPath);
String owner = fs.getOwner();
long len = fs.getLen();
if (owner.equals(settings.getHopsworksUser()) && len == 0) {
dfso.rm(fullPath, true);
}
}
} catch (IOException ex) {
throw new DatasetException(RESTCodes.DatasetErrorCode.INODE_DELETION_ERROR, Level.SEVERE, "path: " + fullPath.toString(), ex.getMessage(), ex);
} finally {
dfs.closeDfsClient(dfso);
}
}
use of io.hops.hopsworks.exceptions.DatasetException in project hopsworks by logicalclocks.
the class DatasetController method updatePermission.
public void updatePermission(Dataset ds, DatasetAccessPermission datasetPermissions, Project project, Project targetProject, Users user) throws DatasetException {
if (ds.isShared(project)) {
throw new DatasetException(RESTCodes.DatasetErrorCode.DATASET_OWNER_ERROR, Level.FINE);
}
if (ds.isPublicDs()) {
throw new DatasetException(RESTCodes.DatasetErrorCode.DATASET_PUBLIC_IMMUTABLE, Level.FINE);
}
PermissionTransition permissionTransition = PermissionTransition.valueOf(ds.getPermission(), datasetPermissions);
changePermissions(ds, permissionTransition, targetProject);
if (!permissionTransition.noop()) {
activityFacade.persistActivity(ActivityFacade.CHANGE_DATASET_PERMISSION + " of " + ds.getName() + " from " + permissionTransition.getFrom().getDescription() + " to " + permissionTransition.getTo().getDescription(), project, user, ActivityFlag.DATASET);
}
}
Aggregations