use of io.hops.hopsworks.persistence.entity.hdfs.user.HdfsGroups in project hopsworks by logicalclocks.
the class PermissionsCleaner method fixDataset.
private void fixDataset(Dataset dataset) throws IOException {
String datasetGroup = hdfsUsersController.getHdfsGroupName(dataset.getProject(), dataset);
String datasetAclGroup = hdfsUsersController.getHdfsAclGroupName(dataset.getProject(), dataset);
DistributedFileSystemOps dfso = null;
try {
dfso = dfsService.getDfsOps();
HdfsGroups hdfsDatasetGroup = getOrCreateGroup(datasetGroup, dfso);
HdfsGroups hdfsDatasetAclGroup = getOrCreateGroup(datasetAclGroup, dfso);
fixPermission(dataset, hdfsDatasetGroup, hdfsDatasetAclGroup, dfso);
} finally {
dfsService.closeDfsClient(dfso);
}
}
use of io.hops.hopsworks.persistence.entity.hdfs.user.HdfsGroups in project hopsworks by logicalclocks.
the class PermissionsCleaner method getOrCreateGroup.
private HdfsGroups getOrCreateGroup(String group, DistributedFileSystemOps dfso) throws IOException {
HdfsGroups hdfsGroup = hdfsGroupsFacade.findByName(group);
if (hdfsGroup == null) {
dfso.addGroup(group);
hdfsGroup = hdfsGroupsFacade.findByName(group);
LOGGER.log(Level.WARNING, "Found and fixed a missing group: group={0}", group);
}
return hdfsGroup;
}
use of io.hops.hopsworks.persistence.entity.hdfs.user.HdfsGroups in project hopsworks by logicalclocks.
the class ProjectController method cleanup.
public void cleanup(Project project, String sessionId, List<Future<?>> projectCreationFutures, boolean decreaseCreatedProj, Users owner) throws GenericException {
if (project == null) {
return;
}
int nbTry = 0;
while (nbTry < 2) {
YarnClientWrapper yarnClientWrapper = ycs.getYarnClientSuper(settings.getConfiguration());
YarnClient client = yarnClientWrapper.getYarnClient();
try {
// remove from project_team so that nobody can see the project anymore
updateProjectTeamRole(project, ProjectRoleTypes.UNDER_REMOVAL);
/*
* get all running yarn application owned by anny of the project members
* we will check later if this application have been stoped and their log aggregation have been finished
* it would be better to check all application (even the ones that have finished running)
* but the log aggregation status is not recovered when the resource manager restart. As a result
* we can't know if the status in "NOT_START" because we should wait for it or because the
* resourcemanager restarted.
*/
Collection<ProjectTeam> team = project.getProjectTeamCollection();
Set<String> hdfsUsers = new HashSet<>();
for (ProjectTeam pt : team) {
String hdfsUsername = hdfsUsersController.getHdfsUserName(project, pt.getUser());
hdfsUsers.add(hdfsUsername);
}
List<ApplicationReport> projectsApps = getYarnApplications(hdfsUsers, client);
// try and close all the jupyter jobs
removeJupyter(project);
removeAnacondaEnv(project);
removeAlertConfigs(project);
// kill jobs
killYarnJobs(project);
waitForJobLogs(projectsApps, client);
List<HdfsUsers> usersToClean = getUsersToClean(project);
List<HdfsGroups> groupsToClean = getGroupsToClean(project);
removeProjectInt(project, usersToClean, groupsToClean, projectCreationFutures, decreaseCreatedProj, owner);
removeCertificatesFromMaterializer(project);
// Delete online featurestore database
onlineFeaturestoreController.removeOnlineFeatureStore(project);
break;
} catch (Exception ex) {
nbTry++;
if (nbTry < 2) {
try {
Thread.sleep(nbTry * 1000);
} catch (InterruptedException ex1) {
LOGGER.log(Level.SEVERE, null, ex1);
}
} else {
throw new GenericException(RESTCodes.GenericErrorCode.UNKNOWN_ERROR, Level.SEVERE, null, ex.getMessage(), ex);
}
} finally {
ycs.closeYarnClient(yarnClientWrapper);
}
}
}
use of io.hops.hopsworks.persistence.entity.hdfs.user.HdfsGroups in project hopsworks by logicalclocks.
the class DistributedFsService method getDfsOpsForTesting.
public DistributedFileSystemOps getDfsOpsForTesting(String username) {
if (username == null || username.isEmpty()) {
throw new NullPointerException("username not set.");
}
// Get hdfs groups
Collection<HdfsGroups> groups = hdfsUsersFacade.findByName(username).getHdfsGroupsCollection();
String[] userGroups = new String[groups.size()];
Iterator<HdfsGroups> iter = groups.iterator();
int i = 0;
while (iter.hasNext()) {
userGroups[i] = iter.next().getName();
i++;
}
UserGroupInformation ugi;
try {
ugi = UserGroupInformation.createProxyUserForTesting(username, UserGroupInformation.getLoginUser(), userGroups);
} catch (IOException ex) {
logger.log(Level.SEVERE, null, ex);
return null;
}
if (settings.getHopsRpcTls()) {
// Runtime exceptions are not useful
try {
bhcs.materializeCertsForNonSuperUser(username);
Configuration newConf = new Configuration(conf);
bhcs.configureTlsForProjectSpecificUser(username, transientDir, newConf);
return new DistributedFileSystemOps(ugi, newConf);
} catch (CryptoPasswordNotFoundException ex) {
logger.log(Level.SEVERE, ex.getMessage(), ex);
bhcs.removeNonSuperUserCertificate(username);
return null;
}
}
return new DistributedFileSystemOps(ugi, conf);
}
use of io.hops.hopsworks.persistence.entity.hdfs.user.HdfsGroups in project hopsworks by logicalclocks.
the class HdfsUsersController method removeFromGroup.
private void removeFromGroup(String hdfsUserName, String group, DistributedFileSystemOps dfso) throws IOException {
HdfsGroups hdfsGroup = hdfsGroupsFacade.findByName(group);
HdfsUsers hdfsUser = hdfsUsersFacade.findByName(hdfsUserName);
removeFromGroup(hdfsUser, hdfsGroup, dfso);
// user not in group
}
Aggregations