use of io.hops.hopsworks.exceptions.GenericException in project hopsworks by logicalclocks.
the class AbstractExecutionController method start.
@Override
@TransactionAttribute(TransactionAttributeType.NOT_SUPPORTED)
public Execution start(Jobs job, String args, Users user) throws JobException, GenericException, ServiceException, ProjectException {
// If the limit for the number of executions for this job has been reached, return an error
checkExecutionLimit(job);
// A user should not be able to start a job if the project is prepaid and it doesn't have quota.
if (job.getProject().getPaymentType().equals(PaymentType.PREPAID)) {
YarnProjectsQuota projectQuota = yarnProjectsQuotaFacade.findByProjectName(job.getProject().getName());
if (projectQuota == null || projectQuota.getQuotaRemaining() <= 0) {
throw new ProjectException(RESTCodes.ProjectErrorCode.PROJECT_QUOTA_ERROR, Level.FINE);
}
}
// If enabled and nodemanagers are all offline throw an JobException exception
if (settings.isCheckingForNodemanagerStatusEnabled() && job.getJobType() != JobType.PYTHON) {
hostServicesFacade.findServices("nodemanager").stream().filter(s -> s.getStatus() == ServiceStatus.Started).findFirst().orElseThrow(() -> new JobException(RESTCodes.JobErrorCode.NODEMANAGERS_OFFLINE, Level.SEVERE));
}
Execution exec;
switch(job.getJobType()) {
case FLINK:
// Materialize certs
return flinkController.startJob(job, user);
case SPARK:
exec = sparkController.startJob(job, args, user);
if (exec == null) {
throw new IllegalArgumentException("Problem getting execution object for: " + job.getJobType());
}
SparkJobConfiguration config = (SparkJobConfiguration) job.getJobConfig();
String path = config.getAppPath();
String pathOfInode;
try {
pathOfInode = Utils.prepPath(path);
} catch (UnsupportedEncodingException ex) {
throw new JobException(RESTCodes.JobErrorCode.JOB_START_FAILED, Level.FINE, "Job name: " + job.getName(), ex.getMessage(), ex);
}
Inode inode = inodeController.getInodeAtPath(pathOfInode);
String inodeName = inode.getInodePK().getName();
activityFacade.persistActivity(ActivityFacade.EXECUTED_JOB + inodeName, job.getProject(), user, ActivityFlag.JOB);
break;
case PYSPARK:
if (job.getProject().getPythonEnvironment() == null) {
throw new ProjectException(RESTCodes.ProjectErrorCode.ANACONDA_NOT_ENABLED, Level.FINEST);
}
exec = sparkController.startJob(job, args, user);
if (exec == null) {
throw new IllegalArgumentException("Error while getting execution object for: " + job.getJobType());
}
break;
default:
throw new GenericException(RESTCodes.GenericErrorCode.UNKNOWN_ACTION, Level.FINE, "Unsupported job type: " + job.getJobType());
}
return exec;
}
use of io.hops.hopsworks.exceptions.GenericException in project hopsworks by logicalclocks.
the class CommandsController method condaOp.
public PythonDep condaOp(CondaOp op, Users user, CondaInstallType installType, Project proj, String channelUrl, String lib, String version, String arg, GitBackend gitBackend, String apiKeyName) throws GenericException, ServiceException {
if (Strings.isNullOrEmpty(version) && CondaOp.isLibraryOp(op)) {
version = Settings.UNKNOWN_LIBRARY_VERSION;
}
// If there is an already ongoing command to uninstall the same library, allow the queuing of a new install op
List<CondaStatus> statuses = new ArrayList<>();
statuses.add(CondaStatus.NEW);
statuses.add(CondaStatus.ONGOING);
List<CondaCommands> uninstallCommands = condaCommandFacade.findByStatusAndCondaOpAndProject(statuses, CondaOp.UNINSTALL, proj);
// Get current uninstall operations for this project
List<CondaCommands> ongoingUninstall = uninstallCommands.stream().filter(c -> c.getLib().equalsIgnoreCase(lib) && c.getInstallType().equals(installType)).collect(Collectors.toList());
// Get currently installed library with the same name and package manager if it exists
Optional<PythonDep> installedDep = proj.getPythonDepCollection().stream().filter(d -> d.getDependency().equalsIgnoreCase(lib) && d.getInstallType().equals(installType)).findFirst();
if (op == CondaOp.INSTALL && installedDep.isPresent() && ongoingUninstall.isEmpty()) {
throw new ServiceException(RESTCodes.ServiceErrorCode.ANACONDA_DEP_INSTALL_FORBIDDEN, Level.FINE, "dep: " + lib);
}
PythonDep dep;
try {
// 1. test if anacondaRepoUrl exists. If not, add it.
AnacondaRepo repo = libraryFacade.getRepo(channelUrl, true);
// 2. Test if pythonDep exists. If not, add it.
dep = libraryFacade.getOrCreateDep(repo, installType, lib, version, true, false);
Collection<PythonDep> depsInProj = proj.getPythonDepCollection();
// 3. Add the python library to the join table for the project
if (op == CondaOp.INSTALL) {
// if upgrade
depsInProj.remove(dep);
depsInProj.add(dep);
}
proj.setPythonDepCollection(depsInProj);
projectFacade.update(proj);
CondaCommands cc = new CondaCommands(user, op, CondaStatus.NEW, installType, proj, lib, version, channelUrl, new Date(), arg, null, false, gitBackend, apiKeyName);
condaCommandFacade.save(cc);
} catch (Exception ex) {
throw new GenericException(RESTCodes.GenericErrorCode.UNKNOWN_ERROR, Level.SEVERE, "condaOp failed", ex.getMessage(), ex);
}
return dep;
}
use of io.hops.hopsworks.exceptions.GenericException in project hopsworks by logicalclocks.
the class HopsFSProvenanceController method setProvCoreXAttr.
private void setProvCoreXAttr(String path, ProvCoreDTO provCore, DistributedFileSystemOps udfso) throws ProvenanceException {
try {
String provType = converter.marshal(provCore);
xattrCtrl.upsertProvXAttr(udfso, path, ProvXAttrs.PROV_XATTR_CORE_VAL, provType.getBytes());
} catch (GenericException | DatasetException | MetadataException e) {
throw new ProvenanceException(RESTCodes.ProvenanceErrorCode.FS_ERROR, Level.WARNING, "hopsfs - set xattr - prov core - error", "hopsfs - set xattr - prov core - error", e);
}
}
use of io.hops.hopsworks.exceptions.GenericException in project hopsworks by logicalclocks.
the class ProjectController method addMember.
public boolean addMember(ProjectTeam projectTeam, Project project, Users newMember, Users owner, DistributedFileSystemOps dfso) throws UserException, KafkaException, ProjectException, FeaturestoreException, IOException {
if (projectTeam.getTeamRole() == null || (!projectTeam.getTeamRole().equals(ProjectRoleTypes.DATA_SCIENTIST.getRole()) && !projectTeam.getTeamRole().equals(ProjectRoleTypes.DATA_OWNER.getRole()))) {
projectTeam.setTeamRole(ProjectRoleTypes.DATA_SCIENTIST.getRole());
}
projectTeam.setTimestamp(new Date());
if (newMember != null && !projectTeamFacade.isUserMemberOfProject(project, newMember)) {
// this makes sure that the member is added to the project sent as the
// first param b/c the security check was made on the parameter sent as path.
projectTeam.getProjectTeamPK().setProjectId(project.getId());
projectTeam.setProject(project);
projectTeam.setUser(newMember);
project.getProjectTeamCollection().add(projectTeam);
projectFacade.update(project);
hdfsUsersController.addNewProjectMember(projectTeam, dfso);
// Add user to kafka topics ACLs by default
if (projectServicesFacade.isServiceEnabledForProject(project, ProjectServiceEnum.KAFKA)) {
kafkaController.addProjectMemberToTopics(project, newMember.getEmail());
}
// if online-featurestore service is enabled in the project, give new member access to it
if (projectServiceFacade.isServiceEnabledForProject(project, ProjectServiceEnum.FEATURESTORE) && settings.isOnlineFeaturestore()) {
Featurestore featurestore = featurestoreController.getProjectFeaturestore(project);
onlineFeaturestoreController.createDatabaseUser(projectTeam.getUser(), featurestore, projectTeam.getTeamRole());
}
// TODO: This should now be a REST call
Future<CertificatesController.CertsResult> certsResultFuture = null;
try {
certsResultFuture = certificatesController.generateCertificates(project, newMember);
certsResultFuture.get();
} catch (Exception ex) {
try {
if (certsResultFuture != null) {
certsResultFuture.get();
}
certificatesController.revokeUserSpecificCertificates(project, newMember);
} catch (IOException | InterruptedException | ExecutionException | HopsSecurityException | GenericException e) {
String failedUser = project.getName() + HdfsUsersController.USER_NAME_DELIMITER + newMember.getUsername();
LOGGER.log(Level.SEVERE, "Could not delete user certificates for user " + failedUser + ". Manual cleanup is needed!!! ", e);
}
LOGGER.log(Level.SEVERE, "error while creating certificates, jupyter kernel: " + ex.getMessage(), ex);
hdfsUsersController.removeMember(projectTeam);
projectTeamFacade.removeProjectTeam(project, newMember);
throw new EJBException("Could not create certificates for user");
}
// trigger project team role update handlers
ProjectTeamRoleHandler.runProjectTeamRoleAddMembersHandlers(projectTeamRoleHandlers, project, Collections.singletonList(newMember), ProjectRoleTypes.fromString(projectTeam.getTeamRole()), false);
String message = "You have been added to project " + project.getName() + " with a role " + projectTeam.getTeamRole() + ".";
messageController.send(newMember, owner, "You have been added to a project.", message, message, "");
LOGGER.log(Level.FINE, "{0} - member added to project : {1}.", new Object[] { newMember.getEmail(), project.getName() });
logActivity(ActivityFacade.NEW_MEMBER + projectTeam.getProjectTeamPK().getTeamMember(), owner, project, ActivityFlag.MEMBER);
return true;
} else {
return false;
}
}
use of io.hops.hopsworks.exceptions.GenericException in project hopsworks by logicalclocks.
the class CAProxy method revokeX509.
private void revokeX509(String parameterName, String parameterValue, String path) throws HopsSecurityException, GenericException {
if (Strings.isNullOrEmpty(parameterValue)) {
throw new HopsSecurityException(RESTCodes.SecurityErrorCode.CERTIFICATE_NOT_FOUND, Level.SEVERE, null, "Certificate parameter value cannot be null or empty");
}
try {
URI revokeURI = new URIBuilder(path).addParameter(parameterName, parameterValue).build();
HttpDelete httpRequest = new HttpDelete(revokeURI);
client.setAuthorizationHeader(httpRequest);
HttpRetryableAction<Void> retryableAction = new HttpRetryableAction<Void>() {
@Override
public Void performAction() throws ClientProtocolException, IOException {
return client.execute(httpRequest, CA_REVOKE_RESPONSE_HANDLER);
}
};
retryableAction.tryAction();
} catch (URISyntaxException ex) {
throw new GenericException(RESTCodes.GenericErrorCode.UNKNOWN_ERROR, Level.SEVERE, null, null, ex);
} catch (ClientProtocolException ex) {
LOG.log(Level.WARNING, "Could not revoke X.509 " + parameterValue, ex);
if (ex.getCause() instanceof HopsSecurityException) {
throw (HopsSecurityException) ex.getCause();
}
throw new HopsSecurityException(RESTCodes.SecurityErrorCode.CERTIFICATE_REVOKATION_ERROR, Level.WARNING, null, null, ex);
} catch (IOException ex) {
LOG.log(Level.SEVERE, "Could not revoke X.509 " + parameterValue, ex);
throw new GenericException(RESTCodes.GenericErrorCode.UNKNOWN_ERROR, Level.SEVERE, "Generic error while revoking X.509", null, ex);
}
}
Aggregations