use of io.hops.hopsworks.persistence.entity.project.Project in project hopsworks by logicalclocks.
the class ProjectsAdmin method setProjectAdminInfo.
@PUT
@Consumes(MediaType.APPLICATION_JSON)
@Path("/projects")
public Response setProjectAdminInfo(ProjectAdminInfoDTO projectAdminInfoDTO, @Context SecurityContext sc) throws ProjectException {
// for changes in space quotas we need to check that both space and ns options are not null
QuotasDTO quotasDTO = projectAdminInfoDTO.getProjectQuotas();
if (quotasDTO != null && (((quotasDTO.getHdfsQuotaInBytes() == null) != (quotasDTO.getHdfsNsQuota() == null)) || ((quotasDTO.getHiveHdfsQuotaInBytes() == null) != (quotasDTO.getHiveHdfsNsQuota() == null)) || ((quotasDTO.getFeaturestoreHdfsQuotaInBytes() == null) != (quotasDTO.getFeaturestoreHdfsNsQuota() == null)))) {
throw new IllegalArgumentException("projectAdminInfoDTO did not provide quotasDTO or the latter was incomplete.");
}
// Build the new project state as Project object
Project project = new Project();
project.setKafkaMaxNumTopics(settings.getKafkaMaxNumTopics());
project.setName(projectAdminInfoDTO.getProjectName());
project.setArchived(projectAdminInfoDTO.getArchived());
project.setPaymentType(projectAdminInfoDTO.getPaymentType());
projectController.adminProjectUpdate(project, quotasDTO);
return noCacheResponse.getNoCacheResponseBuilder(Response.Status.OK).build();
}
use of io.hops.hopsworks.persistence.entity.project.Project in project hopsworks by logicalclocks.
the class ProjectsAdmin method builder.
private ProjectRestDTO builder(Users users, UriInfo uriInfo) {
List<Project> projects = projectFacade.findByUser(users);
ProjectRestDTO dto = new ProjectRestDTO(uriInfo.getAbsolutePathBuilder().build());
projects.forEach(project -> dto.addItem(new ProjectRestDTO(uriInfo.getBaseUriBuilder().path("admin").path("projects").path(project.getId().toString()).build(), project.getId(), project.getName(), project.getCreated())));
return dto;
}
use of io.hops.hopsworks.persistence.entity.project.Project in project hopsworks by logicalclocks.
the class ConfigUtil method getMatch.
public static Map<String, String> getMatch(ProjectServiceAlert alert) {
Project project = alert.getProject();
Map<String, String> match = new HashMap<>();
match.put(Constants.ALERT_TYPE_LABEL, alert.getAlertType().getValue());
match.put(Constants.LABEL_PROJECT, project.getName());
match.put(Constants.LABEL_STATUS, alert.getStatus().getName());
return match;
}
use of io.hops.hopsworks.persistence.entity.project.Project in project hopsworks by logicalclocks.
the class ConfigUtil method getMatch.
public static Map<String, String> getMatch(JobAlert alert) {
Project project = alert.getJobId().getProject();
Map<String, String> match = new HashMap<>();
match.put(Constants.ALERT_TYPE_LABEL, alert.getAlertType().getValue());
match.put(Constants.LABEL_PROJECT, project.getName());
match.put(Constants.LABEL_JOB, alert.getJobId().getName());
match.put(Constants.LABEL_STATUS, alert.getStatus().getName());
return match;
}
use of io.hops.hopsworks.persistence.entity.project.Project in project hopsworks by logicalclocks.
the class FeatureGroupValidationsController method putFeatureGroupValidationResults.
public FeatureGroupValidation putFeatureGroupValidationResults(Users user, Project project, Featuregroup featuregroup, List<ExpectationResult> results, Long validationTime, Boolean logActivity) throws FeaturestoreException {
FeatureGroupValidation.Status status = getValidationResultStatus(results);
alertController.sendAlert(featuregroup, results, status);
String hdfsUsername = hdfsUsersController.getHdfsUserName(project, user);
DistributedFileSystemOps udfso = null;
try {
String path2result = String.format(PATH_TO_DATA_VALIDATION_RESULT, project.getName(), featuregroup.getName(), featuregroup.getVersion()) + Path.SEPARATOR + validationTime;
udfso = distributedFsService.getDfsOps(hdfsUsername);
Gson gson = new GsonBuilder().setFieldNamingPolicy(FieldNamingPolicy.LOWER_CASE_WITH_UNDERSCORES).create();
try (FSDataOutputStream outStream = udfso.create(path2result)) {
outStream.writeBytes(gson.toJson(results));
outStream.hflush();
}
Date validationTimeDate = new Timestamp(validationTime);
FeatureGroupValidation featureGroupValidation = new FeatureGroupValidation(validationTimeDate, inodeController.getInodeAtPath(path2result), featuregroup, getValidationResultStatus(results));
featureGroupValidationFacade.persist(featureGroupValidation);
// Activity logged only if validations ran as part of a fg.save/insert operation
if (logActivity) {
activityFacade.logValidationActivity(featuregroup, user, featureGroupValidation);
}
// Persist validation results but throw an error if the data is invalid so that the client (hsfs) does not insert
if (featuregroup.getValidationType().getSeverity() < status.getSeverity()) {
throw new FeaturestoreException(RESTCodes.FeaturestoreErrorCode.FEATURE_GROUP_CHECKS_FAILED, FINE, "Results: " + results.stream().filter(result -> result.getStatus().getSeverity() >= FeatureGroupValidation.Status.WARNING.getSeverity()).collect(Collectors.toList()));
}
return featureGroupValidation;
} catch (IOException ex) {
throw new FeaturestoreException(RESTCodes.FeaturestoreErrorCode.COULD_NOT_READ_DATA_VALIDATION_RESULT, java.util.logging.Level.WARNING, "Failed to persist validation results", "Failed to persist validation result to HDFS for Feature group " + featuregroup.getName(), ex);
} finally {
distributedFsService.closeDfsClient(udfso);
}
}
Aggregations