use of io.hops.hopsworks.persistence.entity.dataset.Dataset in project hopsworks by logicalclocks.
the class HopssiteService method getLocalDataset.
@GET
@Path("datasets/{publicDSId}/local")
public Response getLocalDataset(@PathParam("publicDSId") String publicDSId, @Context SecurityContext sc) {
Optional<Dataset> datasets = datasetFacade.findByPublicDsId(publicDSId);
if (!datasets.isPresent()) {
return noCacheResponse.getNoCacheResponseBuilder(Response.Status.BAD_REQUEST).build();
}
Dataset ds = datasets.get();
// to get the real parent project
Inode parent = inodes.findParent(ds.getInode());
LocalDatasetDTO datasetDTO = new LocalDatasetDTO(ds.getInodeId(), ds.getName(), ds.getDescription(), parent.getInodePK().getName());
LOGGER.log(Settings.DELA_DEBUG, "Get a local dataset by public id.");
return noCacheResponse.getNoCacheResponseBuilder(Response.Status.OK).entity(datasetDTO).build();
}
use of io.hops.hopsworks.persistence.entity.dataset.Dataset in project hopsworks by logicalclocks.
the class DownloadService method downloadFromHDFS.
/**
* @param project
* @param datasetPath
* @param user
* @return
*/
private Pair<Path, StreamingOutput> downloadFromHDFS(Project project, DatasetPath datasetPath, Users user) throws DatasetException {
String fullPath = datasetPath.getFullPath().toString();
String projectUsername = hdfsUsersController.getHdfsUserName(project, user);
Dataset ds = datasetPath.getDataset();
if (ds.isShared(project) && ds.getFilePermissions().equals(DatasetPermissions.OWNER_ONLY) && !ds.isPublicDs()) {
throw new DatasetException(RESTCodes.DatasetErrorCode.DOWNLOAD_ERROR, Level.FINE);
}
FSDataInputStream stream;
DistributedFileSystemOps udfso;
try {
if (projectUsername != null) {
udfso = dfs.getDfsOps(projectUsername);
Path p = new Path(fullPath);
stream = udfso.open(p);
return new Pair(p, buildOutputStream(stream, udfso));
} else {
throw new DatasetException(RESTCodes.DatasetErrorCode.DOWNLOAD_ERROR, Level.WARNING);
}
} catch (IOException ex) {
throw new DatasetException(RESTCodes.DatasetErrorCode.DOWNLOAD_ERROR, Level.SEVERE, "path: " + fullPath, ex.getMessage(), ex);
}
}
use of io.hops.hopsworks.persistence.entity.dataset.Dataset in project hopsworks by logicalclocks.
the class HopsFSProvenanceController method updateProjectProvType.
public void updateProjectProvType(Project project, ProvTypeDTO newProvType, DistributedFileSystemOps dfso) throws ProvenanceException {
String projectPath = Utils.getProjectPath(project.getName());
ProvCoreDTO provCore = getProvCoreXAttr(projectPath, dfso);
if (provCore != null && newProvType.equals(provCore.getType())) {
return;
}
provCore = new ProvCoreDTO(newProvType, null);
setProvCoreXAttr(projectPath, provCore, dfso);
provCore = new ProvCoreDTO(newProvType, project.getInode().getId());
for (Dataset dataset : project.getDatasetCollection()) {
String datasetPath = Utils.getFileSystemDatasetPath(dataset, settings);
ProvCoreDTO datasetProvCore = getProvCoreXAttr(datasetPath, dfso);
if (datasetProvCore != null && (datasetProvCore.getType().equals(Provenance.Type.DISABLED.dto) || datasetProvCore.getType().equals(newProvType))) {
continue;
}
updateDatasetProvType(datasetPath, provCore, dfso);
}
}
use of io.hops.hopsworks.persistence.entity.dataset.Dataset in project hopsworks by logicalclocks.
the class HopsFSProvenanceController method getDatasetsProvType.
public List<ProvDatasetDTO> getDatasetsProvType(Users user, Project project) throws ProvenanceException {
String hdfsUsername = hdfsUsersController.getHdfsUserName(project, user);
DistributedFileSystemOps udfso = dfs.getDfsOps(hdfsUsername);
try {
List<ProvDatasetDTO> result = new ArrayList<>();
for (Dataset dataset : project.getDatasetCollection()) {
String datasetPath = Utils.getFileSystemDatasetPath(dataset, settings);
ProvCoreDTO provCore = getProvCoreXAttr(datasetPath, udfso);
if (provCore == null) {
throw new ProvenanceException(RESTCodes.ProvenanceErrorCode.INTERNAL_ERROR, Level.WARNING, "malformed dataset - provenance", "no provenance core xattr");
}
ProvDatasetDTO dsState = new ProvDatasetDTO(dataset.getName(), dataset.getInode().getId(), provCore.getType());
result.add(dsState);
}
for (DatasetSharedWith dataset : project.getDatasetSharedWithCollection()) {
String datasetPath = Utils.getFileSystemDatasetPath(dataset.getDataset(), settings);
ProvCoreDTO provCore = getProvCoreXAttr(datasetPath, udfso);
if (provCore == null) {
throw new ProvenanceException(RESTCodes.ProvenanceErrorCode.INTERNAL_ERROR, Level.WARNING, "malformed dataset - provenance", "no provenance core xattr");
}
ProvDatasetDTO dsState = new ProvDatasetDTO(dataset.getDataset().getProject().getName() + "::" + dataset.getDataset().getName(), dataset.getDataset().getInode().getId(), provCore.getType());
result.add(dsState);
}
return result;
} finally {
if (udfso != null) {
dfs.closeDfsClient(udfso);
}
}
}
use of io.hops.hopsworks.persistence.entity.dataset.Dataset in project hopsworks by logicalclocks.
the class ProjectController method getQuotasInternal.
public QuotasDTO getQuotasInternal(Project project) {
long hdfsQuota = -1L, hdfsUsage = -1L, hdfsNsQuota = -1L, hdfsNsCount = -1L, dbhdfsQuota = -1L, dbhdfsUsage = -1L, dbhdfsNsQuota = -1L, dbhdfsNsCount = -1L, fshdfsQuota = -1L, fshdfsUsage = -1L, fshdfsNsQuota = -1L, fshdfsNsCount = -1L;
float yarnRemainingQuota = 0f, yarnTotalQuota = 0f;
// Yarn Quota
YarnProjectsQuota yarnQuota = yarnProjectsQuotaFacade.findByProjectName(project.getName());
if (yarnQuota == null) {
LOGGER.log(Level.SEVERE, "Cannot find YARN quota information for project: " + project.getName());
} else {
yarnRemainingQuota = yarnQuota.getQuotaRemaining();
yarnTotalQuota = yarnQuota.getTotal();
}
// HDFS project directory quota
Optional<HdfsDirectoryWithQuotaFeature> projectInodeAttrsOptional = hdfsDirectoryWithQuotaFeatureFacade.getByInodeId(project.getInode().getId());
if (projectInodeAttrsOptional.isPresent()) {
hdfsQuota = projectInodeAttrsOptional.get().getSsquota().longValue();
hdfsUsage = projectInodeAttrsOptional.get().getStorageSpace().longValue();
hdfsNsQuota = projectInodeAttrsOptional.get().getNsquota().longValue();
hdfsNsCount = projectInodeAttrsOptional.get().getNscount().longValue();
}
// If the Hive service is enabled, get the quota information for the db directory
List<Dataset> datasets = (List<Dataset>) project.getDatasetCollection();
for (Dataset ds : datasets) {
if (ds.getDsType() == DatasetType.HIVEDB) {
Optional<HdfsDirectoryWithQuotaFeature> dbInodeAttrsOptional = hdfsDirectoryWithQuotaFeatureFacade.getByInodeId(ds.getInodeId());
if (dbInodeAttrsOptional.isPresent()) {
dbhdfsQuota = dbInodeAttrsOptional.get().getSsquota().longValue();
dbhdfsUsage = dbInodeAttrsOptional.get().getStorageSpace().longValue();
dbhdfsNsQuota = dbInodeAttrsOptional.get().getNsquota().longValue();
dbhdfsNsCount = dbInodeAttrsOptional.get().getNscount().longValue();
}
} else if (ds.getDsType() == DatasetType.FEATURESTORE) {
Optional<HdfsDirectoryWithQuotaFeature> fsInodeAttrsOptional = hdfsDirectoryWithQuotaFeatureFacade.getByInodeId(ds.getInodeId());
if (fsInodeAttrsOptional.isPresent()) {
fshdfsQuota = fsInodeAttrsOptional.get().getSsquota().longValue();
fshdfsUsage = fsInodeAttrsOptional.get().getStorageSpace().longValue();
fshdfsNsQuota = fsInodeAttrsOptional.get().getNsquota().longValue();
fshdfsNsCount = fsInodeAttrsOptional.get().getNscount().longValue();
}
}
}
Integer kafkaQuota = project.getKafkaMaxNumTopics();
return new QuotasDTO(yarnRemainingQuota, yarnTotalQuota, hdfsQuota, hdfsUsage, hdfsNsQuota, hdfsNsCount, dbhdfsQuota, dbhdfsUsage, dbhdfsNsQuota, dbhdfsNsCount, fshdfsQuota, fshdfsUsage, fshdfsNsQuota, fshdfsNsCount, kafkaQuota);
}
Aggregations