use of org.pmiops.workbench.exceptions.BadRequestException in project workbench by all-of-us.
the class WorkspacesController method updateWorkspace.
@Override
public ResponseEntity<Workspace> updateWorkspace(String workspaceNamespace, String workspaceId, UpdateWorkspaceRequest request) {
org.pmiops.workbench.db.model.Workspace dbWorkspace = workspaceService.getRequired(workspaceNamespace, workspaceId);
workspaceService.enforceWorkspaceAccessLevel(workspaceNamespace, workspaceId, WorkspaceAccessLevel.WRITER);
Workspace workspace = request.getWorkspace();
if (workspace == null) {
throw new BadRequestException("No workspace provided in request");
}
if (Strings.isNullOrEmpty(workspace.getEtag())) {
throw new BadRequestException("Missing required update field 'etag'");
}
int version = Etags.toVersion(workspace.getEtag());
if (dbWorkspace.getVersion() != version) {
throw new ConflictException("Attempted to modify outdated workspace version");
}
if (workspace.getDataAccessLevel() != null && !dbWorkspace.getDataAccessLevel().equals(workspace.getDataAccessLevel())) {
throw new BadRequestException("Attempted to change data access level");
}
if (workspace.getDescription() != null) {
dbWorkspace.setDescription(workspace.getDescription());
}
if (workspace.getName() != null) {
dbWorkspace.setName(workspace.getName());
}
// TODO: handle research purpose
setCdrVersionId(dbWorkspace, workspace.getCdrVersionId());
// The version asserted on save is the same as the one we read via
// getRequired() above, see RW-215 for details.
dbWorkspace = workspaceService.saveWithLastModified(dbWorkspace);
return ResponseEntity.ok(TO_SINGLE_CLIENT_WORKSPACE.apply(dbWorkspace));
}
use of org.pmiops.workbench.exceptions.BadRequestException in project workbench by all-of-us.
the class WorkspacesController method cloneWorkspace.
@Override
public ResponseEntity<CloneWorkspaceResponse> cloneWorkspace(String workspaceNamespace, String workspaceId, CloneWorkspaceRequest body) {
Workspace workspace = body.getWorkspace();
if (Strings.isNullOrEmpty(workspace.getNamespace())) {
throw new BadRequestException("missing required field 'workspace.namespace'");
} else if (Strings.isNullOrEmpty(workspace.getName())) {
throw new BadRequestException("missing required field 'workspace.name'");
} else if (workspace.getResearchPurpose() == null) {
throw new BadRequestException("missing required field 'workspace.researchPurpose'");
}
User user = userProvider.get();
if (workspaceService.getByName(workspace.getNamespace(), workspace.getName()) != null) {
throw new ConflictException(String.format("Workspace %s/%s already exists", workspace.getNamespace(), workspace.getName()));
}
// Retrieving the workspace is done first, which acts as an access check.
String fromBucket = null;
try {
fromBucket = fireCloudService.getWorkspace(workspaceNamespace, workspaceId).getWorkspace().getBucketName();
} catch (ApiException e) {
if (e.getCode() == 404) {
log.log(Level.INFO, "Firecloud workspace not found", e);
throw new NotFoundException(String.format("workspace %s/%s not found or not accessible", workspaceNamespace, workspaceId));
}
log.log(Level.SEVERE, "Firecloud server error", e);
throw new ServerErrorException();
}
org.pmiops.workbench.db.model.Workspace fromWorkspace = workspaceService.getRequiredWithCohorts(workspaceNamespace, workspaceId);
if (fromWorkspace == null) {
throw new NotFoundException(String.format("Workspace %s/%s not found", workspaceNamespace, workspaceId));
}
FirecloudWorkspaceId fcWorkspaceId = generateFirecloudWorkspaceId(workspace.getNamespace(), workspace.getName());
fireCloudService.cloneWorkspace(workspaceNamespace, workspaceId, fcWorkspaceId.getWorkspaceNamespace(), fcWorkspaceId.getWorkspaceName());
org.pmiops.workbench.firecloud.model.Workspace toFcWorkspace = null;
try {
toFcWorkspace = fireCloudService.getWorkspace(fcWorkspaceId.getWorkspaceNamespace(), fcWorkspaceId.getWorkspaceName()).getWorkspace();
} catch (ApiException e) {
log.log(Level.SEVERE, "Firecloud error retrieving newly cloned workspace", e);
throw new ServerErrorException();
}
// feasibly copy within a single API request.
for (Blob b : cloudStorageService.getBlobList(fromBucket, NOTEBOOKS_WORKSPACE_DIRECTORY)) {
if (!NOTEBOOK_PATTERN.matcher(b.getName()).matches()) {
continue;
}
if (b.getSize() != null && b.getSize() / 1e6 > MAX_NOTEBOOK_SIZE_MB) {
throw new FailedPreconditionException(String.format("workspace %s/%s contains a notebook larger than %dMB: '%s'; cannot clone - please " + "remove this notebook, reduce its size, or contact the workspace owner", workspaceNamespace, workspaceId, MAX_NOTEBOOK_SIZE_MB, b.getName()));
}
cloudStorageService.copyBlob(b.getBlobId(), BlobId.of(toFcWorkspace.getBucketName(), b.getName()));
}
// The final step in the process is to clone the AoU representation of the
// workspace. The implication here is that we may generate orphaned
// Firecloud workspaces / buckets, but a user should not be able to see
// half-way cloned workspaces via AoU - so it will just appear as a
// transient failure.
org.pmiops.workbench.db.model.Workspace toWorkspace = FROM_CLIENT_WORKSPACE.apply(body.getWorkspace());
org.pmiops.workbench.db.model.Workspace dbWorkspace = new org.pmiops.workbench.db.model.Workspace();
Timestamp now = new Timestamp(clock.instant().toEpochMilli());
dbWorkspace.setFirecloudName(fcWorkspaceId.getWorkspaceName());
dbWorkspace.setWorkspaceNamespace(fcWorkspaceId.getWorkspaceNamespace());
dbWorkspace.setCreator(user);
dbWorkspace.setCreationTime(now);
dbWorkspace.setLastModifiedTime(now);
dbWorkspace.setVersion(1);
dbWorkspace.setName(toWorkspace.getName());
ResearchPurpose researchPurpose = body.getWorkspace().getResearchPurpose();
setResearchPurposeDetails(dbWorkspace, researchPurpose);
if (researchPurpose.getReviewRequested()) {
// Use a consistent timestamp.
dbWorkspace.setTimeRequested(now);
}
dbWorkspace.setReviewRequested(researchPurpose.getReviewRequested());
// Clone the previous description, by default.
if (Strings.isNullOrEmpty(toWorkspace.getDescription())) {
dbWorkspace.setDescription(fromWorkspace.getDescription());
} else {
dbWorkspace.setDescription(toWorkspace.getDescription());
}
dbWorkspace.setCdrVersion(fromWorkspace.getCdrVersion());
dbWorkspace.setDataAccessLevel(fromWorkspace.getDataAccessLevel());
writeWorkspaceConfigFile(toFcWorkspace, dbWorkspace.getCdrVersion());
org.pmiops.workbench.db.model.WorkspaceUserRole permissions = new org.pmiops.workbench.db.model.WorkspaceUserRole();
permissions.setRole(WorkspaceAccessLevel.OWNER);
permissions.setWorkspace(dbWorkspace);
permissions.setUser(user);
dbWorkspace.addWorkspaceUserRole(permissions);
dbWorkspace = workspaceService.saveAndCloneCohorts(fromWorkspace, dbWorkspace);
CloneWorkspaceResponse resp = new CloneWorkspaceResponse();
resp.setWorkspace(TO_SINGLE_CLIENT_WORKSPACE_FROM_FC_AND_DB.apply(dbWorkspace, toFcWorkspace));
return ResponseEntity.ok(resp);
}
use of org.pmiops.workbench.exceptions.BadRequestException in project workbench by all-of-us.
the class WorkspacesController method shareWorkspace.
@Override
public ResponseEntity<ShareWorkspaceResponse> shareWorkspace(String workspaceNamespace, String workspaceId, ShareWorkspaceRequest request) {
if (Strings.isNullOrEmpty(request.getWorkspaceEtag())) {
throw new BadRequestException("Missing required update field 'workspaceEtag'");
}
org.pmiops.workbench.db.model.Workspace dbWorkspace = workspaceService.getRequired(workspaceNamespace, workspaceId);
int version = Etags.toVersion(request.getWorkspaceEtag());
if (dbWorkspace.getVersion() != version) {
throw new ConflictException("Attempted to modify user roles with outdated workspace etag");
}
Set<WorkspaceUserRole> dbUserRoles = new HashSet<WorkspaceUserRole>();
for (UserRole user : request.getItems()) {
WorkspaceUserRole newUserRole = new WorkspaceUserRole();
User newUser = userDao.findUserByEmail(user.getEmail());
if (newUser == null) {
throw new BadRequestException(String.format("User %s doesn't exist", user.getEmail()));
}
newUserRole.setUser(newUser);
newUserRole.setRole(user.getRole());
dbUserRoles.add(newUserRole);
}
// This automatically enforces owner role.
dbWorkspace = workspaceService.updateUserRoles(dbWorkspace, dbUserRoles);
ShareWorkspaceResponse resp = new ShareWorkspaceResponse();
resp.setWorkspaceEtag(Etags.fromVersion(dbWorkspace.getVersion()));
return ResponseEntity.ok(resp);
}
use of org.pmiops.workbench.exceptions.BadRequestException in project workbench by all-of-us.
the class CohortMaterializationService method getTableQueryAndConfig.
private TableQueryAndConfig getTableQueryAndConfig(FieldSet fieldSet) {
TableQuery tableQuery;
if (fieldSet == null) {
tableQuery = new TableQuery();
tableQuery.setTableName(PERSON_TABLE);
tableQuery.setColumns(ImmutableList.of(PERSON_ID));
} else {
tableQuery = fieldSet.getTableQuery();
if (tableQuery == null) {
// TODO: support other kinds of field sets besides tableQuery
throw new BadRequestException("tableQuery must be specified in field sets");
}
String tableName = tableQuery.getTableName();
if (Strings.isNullOrEmpty(tableName)) {
throw new BadRequestException("Table name must be specified in field sets");
}
}
CdrBigQuerySchemaConfig cdrSchemaConfig = cdrSchemaConfigProvider.get();
TableConfig tableConfig = cdrSchemaConfig.cohortTables.get(tableQuery.getTableName());
if (tableConfig == null) {
throw new BadRequestException("Table " + tableQuery.getTableName() + " is not a valid " + "cohort table; valid tables are: " + cdrSchemaConfig.cohortTables.keySet().stream().sorted().collect(Collectors.joining(",")));
}
Map<String, ColumnConfig> columnMap = Maps.uniqueIndex(tableConfig.columns, columnConfig -> columnConfig.name);
List<String> columnNames = tableQuery.getColumns();
if (columnNames == null || columnNames.isEmpty()) {
// By default, return all columns on the table in question in our configuration.
tableQuery.setColumns(columnMap.keySet().stream().collect(Collectors.toList()));
} else {
for (String columnName : columnNames) {
// TODO: handle columns on foreign key tables
if (!columnMap.containsKey(columnName)) {
throw new BadRequestException("Unrecognized column name: " + columnName);
}
}
}
List<String> orderBy = tableQuery.getOrderBy();
if (orderBy == null || orderBy.isEmpty()) {
ColumnConfig primaryKey = findPrimaryKey(tableConfig);
if (PERSON_ID.equals(primaryKey)) {
tableQuery.setOrderBy(ImmutableList.of(PERSON_ID));
} else {
// TODO: consider having per-table default sort order based on e.g. timestamp
tableQuery.setOrderBy(ImmutableList.of(PERSON_ID, primaryKey.name));
}
} else {
for (String columnName : orderBy) {
if (columnName.toUpperCase().endsWith(DESCENDING_SUFFIX)) {
columnName = columnName.substring(0, columnName.length() - DESCENDING_SUFFIX.length());
}
if (!columnMap.containsKey(columnName)) {
throw new BadRequestException("Invalid column in orderBy: " + columnName);
}
}
}
return new TableQueryAndConfig(tableQuery, tableConfig, columnMap);
}
use of org.pmiops.workbench.exceptions.BadRequestException in project workbench by all-of-us.
the class CohortMaterializationService method materializeCohort.
public MaterializeCohortResponse materializeCohort(@Nullable CohortReview cohortReview, SearchRequest searchRequest, MaterializeCohortRequest request) {
long offset = 0L;
FieldSet fieldSet = request.getFieldSet();
List<CohortStatus> statusFilter = request.getStatusFilter();
String paginationToken = request.getPageToken();
int pageSize = request.getPageSize();
// TODO: add CDR version ID here
Object[] paginationParameters = new Object[] { searchRequest, statusFilter };
if (paginationToken != null) {
PaginationToken token = PaginationToken.fromBase64(paginationToken);
if (token.matchesParameters(paginationParameters)) {
offset = token.getOffset();
} else {
throw new BadRequestException(String.format("Use of pagination token %s with new parameter values", paginationToken));
}
}
int limit = pageSize + 1;
if (statusFilter == null) {
statusFilter = ALL_STATUSES;
}
ParticipantCriteria criteria;
MaterializeCohortResponse response = new MaterializeCohortResponse();
if (statusFilter.contains(CohortStatus.NOT_REVIEWED)) {
Set<Long> participantIdsToExclude;
if (statusFilter.size() < CohortStatus.values().length) {
// Find the participant IDs that have statuses which *aren't* in the filter.
Set<CohortStatus> statusesToExclude = Sets.difference(ImmutableSet.copyOf(CohortStatus.values()), ImmutableSet.copyOf(statusFilter));
participantIdsToExclude = getParticipantIdsWithStatus(cohortReview, ImmutableList.copyOf(statusesToExclude));
} else {
participantIdsToExclude = ImmutableSet.of();
}
criteria = new ParticipantCriteria(searchRequest, participantIdsToExclude);
} else {
Set<Long> participantIds = getParticipantIdsWithStatus(cohortReview, statusFilter);
if (participantIds.isEmpty()) {
// return an empty response.
return response;
}
criteria = new ParticipantCriteria(participantIds);
}
TableQueryAndConfig tableQueryAndConfig = getTableQueryAndConfig(fieldSet);
QueryJobConfiguration jobConfiguration = fieldSetQueryBuilder.buildQuery(criteria, tableQueryAndConfig, limit, offset);
QueryResult result;
try {
result = bigQueryService.executeQuery(bigQueryService.filterBigQueryConfig(jobConfiguration));
} catch (BigQueryException e) {
if (e.getCode() == HttpServletResponse.SC_SERVICE_UNAVAILABLE) {
throw new ServerUnavailableException("BigQuery was temporarily unavailable, try again later", e);
} else if (e.getCode() == HttpServletResponse.SC_FORBIDDEN) {
throw new ForbiddenException("Access to the CDR is denied", e);
} else {
throw new ServerErrorException(String.format("An unexpected error occurred materializing the cohort with " + "query = (%s), params = (%s)", jobConfiguration.getQuery(), jobConfiguration.getNamedParameters()), e);
}
}
Map<String, Integer> rm = bigQueryService.getResultMapper(result);
int numResults = 0;
boolean hasMoreResults = false;
ArrayList<Object> results = new ArrayList<>();
for (List<FieldValue> row : result.iterateAll()) {
if (numResults == pageSize) {
hasMoreResults = true;
break;
}
Map<String, Object> resultMap = fieldSetQueryBuilder.extractResults(tableQueryAndConfig, row);
results.add(resultMap);
numResults++;
}
response.setResults(results);
if (hasMoreResults) {
// TODO: consider pagination based on cursor / values rather than offset
PaginationToken token = PaginationToken.of(offset + pageSize, paginationParameters);
response.setNextPageToken(token.toBase64());
}
return response;
}
Aggregations