use of io.hops.hopsworks.persistence.entity.hdfs.inode.Inode in project hopsworks by logicalclocks.
the class AbstractExecutionController method start.
@Override
@TransactionAttribute(TransactionAttributeType.NOT_SUPPORTED)
public Execution start(Jobs job, String args, Users user) throws JobException, GenericException, ServiceException, ProjectException {
// If the limit for the number of executions for this job has been reached, return an error
checkExecutionLimit(job);
// A user should not be able to start a job if the project is prepaid and it doesn't have quota.
if (job.getProject().getPaymentType().equals(PaymentType.PREPAID)) {
YarnProjectsQuota projectQuota = yarnProjectsQuotaFacade.findByProjectName(job.getProject().getName());
if (projectQuota == null || projectQuota.getQuotaRemaining() <= 0) {
throw new ProjectException(RESTCodes.ProjectErrorCode.PROJECT_QUOTA_ERROR, Level.FINE);
}
}
// If enabled and nodemanagers are all offline throw an JobException exception
if (settings.isCheckingForNodemanagerStatusEnabled() && job.getJobType() != JobType.PYTHON) {
hostServicesFacade.findServices("nodemanager").stream().filter(s -> s.getStatus() == ServiceStatus.Started).findFirst().orElseThrow(() -> new JobException(RESTCodes.JobErrorCode.NODEMANAGERS_OFFLINE, Level.SEVERE));
}
Execution exec;
switch(job.getJobType()) {
case FLINK:
// Materialize certs
return flinkController.startJob(job, user);
case SPARK:
exec = sparkController.startJob(job, args, user);
if (exec == null) {
throw new IllegalArgumentException("Problem getting execution object for: " + job.getJobType());
}
SparkJobConfiguration config = (SparkJobConfiguration) job.getJobConfig();
String path = config.getAppPath();
String pathOfInode;
try {
pathOfInode = Utils.prepPath(path);
} catch (UnsupportedEncodingException ex) {
throw new JobException(RESTCodes.JobErrorCode.JOB_START_FAILED, Level.FINE, "Job name: " + job.getName(), ex.getMessage(), ex);
}
Inode inode = inodeController.getInodeAtPath(pathOfInode);
String inodeName = inode.getInodePK().getName();
activityFacade.persistActivity(ActivityFacade.EXECUTED_JOB + inodeName, job.getProject(), user, ActivityFlag.JOB);
break;
case PYSPARK:
if (job.getProject().getPythonEnvironment() == null) {
throw new ProjectException(RESTCodes.ProjectErrorCode.ANACONDA_NOT_ENABLED, Level.FINEST);
}
exec = sparkController.startJob(job, args, user);
if (exec == null) {
throw new IllegalArgumentException("Error while getting execution object for: " + job.getJobType());
}
break;
default:
throw new GenericException(RESTCodes.GenericErrorCode.UNKNOWN_ACTION, Level.FINE, "Unsupported job type: " + job.getJobType());
}
return exec;
}
use of io.hops.hopsworks.persistence.entity.hdfs.inode.Inode in project hopsworks by logicalclocks.
the class FlinkCleaner method deleteOrphanJobs.
@Schedule(persistent = false, minute = "0", hour = "1")
public void deleteOrphanJobs(Timer timer) {
LOGGER.log(Level.INFO, "Running FlinkCleaner.");
// Get all jobs from history server
DistributedFileSystemOps dfso = null;
try {
List<HostServices> hosts = hostServicesFacade.findServices("flinkhistoryserver");
if (hosts.isEmpty()) {
LOGGER.log(Level.INFO, "Could not find flinkhistoryserver service running on any server, " + "shutting down timer.");
timer.cancel();
}
// Read all completed jobs from "historyserver.archive.fs.dir"
String archiveDir = flinkController.getArchiveDir();
// Delete all without hdfs user
dfso = dfs.getDfsOps();
List<Inode> jobs = inodeController.getChildren(archiveDir);
for (Inode job : jobs) {
if (job.getHdfsUser() == null) {
dfso.rm(new Path(archiveDir + File.separator + job.getInodePK().getName()), false);
}
}
} catch (Exception ex) {
LOGGER.log(Level.SEVERE, "Could not access flink configuration file", ex);
} finally {
if (dfso != null) {
dfs.closeDfsClient(dfso);
}
}
}
use of io.hops.hopsworks.persistence.entity.hdfs.inode.Inode in project hopsworks by logicalclocks.
the class InodeController method getAllChildren.
/**
* Get all the children of <i>parent</i>. Alias of findByParent().
* <p/>
* @param parent
* @param children
* @return
*/
public void getAllChildren(Inode parent, List<Inode> children) {
List<Inode> curr = inodeFacade.findByParent(parent);
children.addAll(curr);
for (Inode inode : curr) {
if (inode.isDir()) {
getAllChildren(inode, children);
}
}
}
use of io.hops.hopsworks.persistence.entity.hdfs.inode.Inode in project hopsworks by logicalclocks.
the class InodeController method getSize.
/**
* @param inode
* @return
*/
public long getSize(Inode inode) {
if (!inode.isDir()) {
return inode.getSize();
}
long size = 0;
List<Inode> children = getChildren(inode);
for (Inode i : children) {
if (!i.isDir()) {
size += i.getSize();
} else {
size += getSize(i);
}
}
return size;
}
use of io.hops.hopsworks.persistence.entity.hdfs.inode.Inode in project hopsworks by logicalclocks.
the class TestProjectProvenanceResource method upsertXAttr.
@POST
@Path("xattr")
@Produces(MediaType.APPLICATION_JSON)
@AllowedProjectRoles({ AllowedProjectRoles.DATA_SCIENTIST, AllowedProjectRoles.DATA_OWNER })
@JWTRequired(acceptedTokens = { Audience.API }, allowedUserRoles = { "HOPS_ADMIN", "HOPS_USER" })
public Response upsertXAttr(@QueryParam("inodeId") Long inodeId, @QueryParam("xattrName") String xattrName, @QueryParam("xattrValue") String xattrValue) throws MetadataException, DatasetException {
Inode inode = inodeFacade.findById(inodeId);
String path = inodeCtrl.getPath(inode);
DistributedFileSystemOps dfso = dfs.getDfsOps();
try {
xattrCtrl.upsertProvXAttr(dfso, path, xattrName, xattrValue.getBytes());
} finally {
if (dfso != null) {
dfs.closeDfsClient(dfso);
}
}
return Response.ok().build();
}
Aggregations