use of org.pmiops.workbench.exceptions.ServerErrorException in project workbench by all-of-us.
the class WorkspaceServiceImpl method getWorkspaceAccessLevel.
@Override
public WorkspaceAccessLevel getWorkspaceAccessLevel(String workspaceNamespace, String workspaceId) {
String userAccess;
try {
userAccess = fireCloudService.getWorkspace(workspaceNamespace, workspaceId).getAccessLevel();
} catch (org.pmiops.workbench.firecloud.ApiException e) {
if (e.getCode() == 404) {
throw new NotFoundException(String.format("Workspace %s/%s not found", workspaceNamespace, workspaceId));
} else {
throw new ServerErrorException(e.getResponseBody());
}
}
if (userAccess.equals(PROJECT_OWNER_ACCESS_LEVEL)) {
return WorkspaceAccessLevel.OWNER;
}
WorkspaceAccessLevel result = WorkspaceAccessLevel.fromValue(userAccess);
if (result == null) {
throw new ServerErrorException("Unrecognized access level: " + userAccess);
}
return result;
}
use of org.pmiops.workbench.exceptions.ServerErrorException in project workbench by all-of-us.
the class WorkspacesController method getWorkspace.
@Override
public ResponseEntity<WorkspaceResponse> getWorkspace(String workspaceNamespace, String workspaceId) {
org.pmiops.workbench.db.model.Workspace dbWorkspace = workspaceService.getRequired(workspaceNamespace, workspaceId);
org.pmiops.workbench.firecloud.model.WorkspaceResponse fcResponse;
org.pmiops.workbench.firecloud.model.Workspace fcWorkspace;
WorkspaceResponse response = new WorkspaceResponse();
try {
// This enforces access controls.
fcResponse = fireCloudService.getWorkspace(workspaceNamespace, workspaceId);
fcWorkspace = fcResponse.getWorkspace();
} catch (org.pmiops.workbench.firecloud.ApiException e) {
throw ExceptionUtils.convertFirecloudException(e);
}
if (fcResponse.getAccessLevel().equals(WorkspaceService.PROJECT_OWNER_ACCESS_LEVEL)) {
// We don't expose PROJECT_OWNER in our API; just use OWNER.
response.setAccessLevel(WorkspaceAccessLevel.OWNER);
} else {
response.setAccessLevel(WorkspaceAccessLevel.fromValue(fcResponse.getAccessLevel()));
if (response.getAccessLevel() == null) {
throw new ServerErrorException("Unsupported access level: " + fcResponse.getAccessLevel());
}
}
response.setWorkspace(TO_SINGLE_CLIENT_WORKSPACE_FROM_FC_AND_DB.apply(dbWorkspace, fcWorkspace));
return ResponseEntity.ok(response);
}
use of org.pmiops.workbench.exceptions.ServerErrorException in project workbench by all-of-us.
the class WorkspacesController method cloneWorkspace.
@Override
public ResponseEntity<CloneWorkspaceResponse> cloneWorkspace(String workspaceNamespace, String workspaceId, CloneWorkspaceRequest body) {
Workspace workspace = body.getWorkspace();
if (Strings.isNullOrEmpty(workspace.getNamespace())) {
throw new BadRequestException("missing required field 'workspace.namespace'");
} else if (Strings.isNullOrEmpty(workspace.getName())) {
throw new BadRequestException("missing required field 'workspace.name'");
} else if (workspace.getResearchPurpose() == null) {
throw new BadRequestException("missing required field 'workspace.researchPurpose'");
}
User user = userProvider.get();
if (workspaceService.getByName(workspace.getNamespace(), workspace.getName()) != null) {
throw new ConflictException(String.format("Workspace %s/%s already exists", workspace.getNamespace(), workspace.getName()));
}
// Retrieving the workspace is done first, which acts as an access check.
String fromBucket = null;
try {
fromBucket = fireCloudService.getWorkspace(workspaceNamespace, workspaceId).getWorkspace().getBucketName();
} catch (ApiException e) {
if (e.getCode() == 404) {
log.log(Level.INFO, "Firecloud workspace not found", e);
throw new NotFoundException(String.format("workspace %s/%s not found or not accessible", workspaceNamespace, workspaceId));
}
log.log(Level.SEVERE, "Firecloud server error", e);
throw new ServerErrorException();
}
org.pmiops.workbench.db.model.Workspace fromWorkspace = workspaceService.getRequiredWithCohorts(workspaceNamespace, workspaceId);
if (fromWorkspace == null) {
throw new NotFoundException(String.format("Workspace %s/%s not found", workspaceNamespace, workspaceId));
}
FirecloudWorkspaceId fcWorkspaceId = generateFirecloudWorkspaceId(workspace.getNamespace(), workspace.getName());
fireCloudService.cloneWorkspace(workspaceNamespace, workspaceId, fcWorkspaceId.getWorkspaceNamespace(), fcWorkspaceId.getWorkspaceName());
org.pmiops.workbench.firecloud.model.Workspace toFcWorkspace = null;
try {
toFcWorkspace = fireCloudService.getWorkspace(fcWorkspaceId.getWorkspaceNamespace(), fcWorkspaceId.getWorkspaceName()).getWorkspace();
} catch (ApiException e) {
log.log(Level.SEVERE, "Firecloud error retrieving newly cloned workspace", e);
throw new ServerErrorException();
}
// feasibly copy within a single API request.
for (Blob b : cloudStorageService.getBlobList(fromBucket, NOTEBOOKS_WORKSPACE_DIRECTORY)) {
if (!NOTEBOOK_PATTERN.matcher(b.getName()).matches()) {
continue;
}
if (b.getSize() != null && b.getSize() / 1e6 > MAX_NOTEBOOK_SIZE_MB) {
throw new FailedPreconditionException(String.format("workspace %s/%s contains a notebook larger than %dMB: '%s'; cannot clone - please " + "remove this notebook, reduce its size, or contact the workspace owner", workspaceNamespace, workspaceId, MAX_NOTEBOOK_SIZE_MB, b.getName()));
}
cloudStorageService.copyBlob(b.getBlobId(), BlobId.of(toFcWorkspace.getBucketName(), b.getName()));
}
// The final step in the process is to clone the AoU representation of the
// workspace. The implication here is that we may generate orphaned
// Firecloud workspaces / buckets, but a user should not be able to see
// half-way cloned workspaces via AoU - so it will just appear as a
// transient failure.
org.pmiops.workbench.db.model.Workspace toWorkspace = FROM_CLIENT_WORKSPACE.apply(body.getWorkspace());
org.pmiops.workbench.db.model.Workspace dbWorkspace = new org.pmiops.workbench.db.model.Workspace();
Timestamp now = new Timestamp(clock.instant().toEpochMilli());
dbWorkspace.setFirecloudName(fcWorkspaceId.getWorkspaceName());
dbWorkspace.setWorkspaceNamespace(fcWorkspaceId.getWorkspaceNamespace());
dbWorkspace.setCreator(user);
dbWorkspace.setCreationTime(now);
dbWorkspace.setLastModifiedTime(now);
dbWorkspace.setVersion(1);
dbWorkspace.setName(toWorkspace.getName());
ResearchPurpose researchPurpose = body.getWorkspace().getResearchPurpose();
setResearchPurposeDetails(dbWorkspace, researchPurpose);
if (researchPurpose.getReviewRequested()) {
// Use a consistent timestamp.
dbWorkspace.setTimeRequested(now);
}
dbWorkspace.setReviewRequested(researchPurpose.getReviewRequested());
// Clone the previous description, by default.
if (Strings.isNullOrEmpty(toWorkspace.getDescription())) {
dbWorkspace.setDescription(fromWorkspace.getDescription());
} else {
dbWorkspace.setDescription(toWorkspace.getDescription());
}
dbWorkspace.setCdrVersion(fromWorkspace.getCdrVersion());
dbWorkspace.setDataAccessLevel(fromWorkspace.getDataAccessLevel());
writeWorkspaceConfigFile(toFcWorkspace, dbWorkspace.getCdrVersion());
org.pmiops.workbench.db.model.WorkspaceUserRole permissions = new org.pmiops.workbench.db.model.WorkspaceUserRole();
permissions.setRole(WorkspaceAccessLevel.OWNER);
permissions.setWorkspace(dbWorkspace);
permissions.setUser(user);
dbWorkspace.addWorkspaceUserRole(permissions);
dbWorkspace = workspaceService.saveAndCloneCohorts(fromWorkspace, dbWorkspace);
CloneWorkspaceResponse resp = new CloneWorkspaceResponse();
resp.setWorkspace(TO_SINGLE_CLIENT_WORKSPACE_FROM_FC_AND_DB.apply(dbWorkspace, toFcWorkspace));
return ResponseEntity.ok(resp);
}
use of org.pmiops.workbench.exceptions.ServerErrorException in project workbench by all-of-us.
the class CohortMaterializationService method materializeCohort.
public MaterializeCohortResponse materializeCohort(@Nullable CohortReview cohortReview, SearchRequest searchRequest, MaterializeCohortRequest request) {
long offset = 0L;
FieldSet fieldSet = request.getFieldSet();
List<CohortStatus> statusFilter = request.getStatusFilter();
String paginationToken = request.getPageToken();
int pageSize = request.getPageSize();
// TODO: add CDR version ID here
Object[] paginationParameters = new Object[] { searchRequest, statusFilter };
if (paginationToken != null) {
PaginationToken token = PaginationToken.fromBase64(paginationToken);
if (token.matchesParameters(paginationParameters)) {
offset = token.getOffset();
} else {
throw new BadRequestException(String.format("Use of pagination token %s with new parameter values", paginationToken));
}
}
int limit = pageSize + 1;
if (statusFilter == null) {
statusFilter = ALL_STATUSES;
}
ParticipantCriteria criteria;
MaterializeCohortResponse response = new MaterializeCohortResponse();
if (statusFilter.contains(CohortStatus.NOT_REVIEWED)) {
Set<Long> participantIdsToExclude;
if (statusFilter.size() < CohortStatus.values().length) {
// Find the participant IDs that have statuses which *aren't* in the filter.
Set<CohortStatus> statusesToExclude = Sets.difference(ImmutableSet.copyOf(CohortStatus.values()), ImmutableSet.copyOf(statusFilter));
participantIdsToExclude = getParticipantIdsWithStatus(cohortReview, ImmutableList.copyOf(statusesToExclude));
} else {
participantIdsToExclude = ImmutableSet.of();
}
criteria = new ParticipantCriteria(searchRequest, participantIdsToExclude);
} else {
Set<Long> participantIds = getParticipantIdsWithStatus(cohortReview, statusFilter);
if (participantIds.isEmpty()) {
// return an empty response.
return response;
}
criteria = new ParticipantCriteria(participantIds);
}
TableQueryAndConfig tableQueryAndConfig = getTableQueryAndConfig(fieldSet);
QueryJobConfiguration jobConfiguration = fieldSetQueryBuilder.buildQuery(criteria, tableQueryAndConfig, limit, offset);
QueryResult result;
try {
result = bigQueryService.executeQuery(bigQueryService.filterBigQueryConfig(jobConfiguration));
} catch (BigQueryException e) {
if (e.getCode() == HttpServletResponse.SC_SERVICE_UNAVAILABLE) {
throw new ServerUnavailableException("BigQuery was temporarily unavailable, try again later", e);
} else if (e.getCode() == HttpServletResponse.SC_FORBIDDEN) {
throw new ForbiddenException("Access to the CDR is denied", e);
} else {
throw new ServerErrorException(String.format("An unexpected error occurred materializing the cohort with " + "query = (%s), params = (%s)", jobConfiguration.getQuery(), jobConfiguration.getNamedParameters()), e);
}
}
Map<String, Integer> rm = bigQueryService.getResultMapper(result);
int numResults = 0;
boolean hasMoreResults = false;
ArrayList<Object> results = new ArrayList<>();
for (List<FieldValue> row : result.iterateAll()) {
if (numResults == pageSize) {
hasMoreResults = true;
break;
}
Map<String, Object> resultMap = fieldSetQueryBuilder.extractResults(tableQueryAndConfig, row);
results.add(resultMap);
numResults++;
}
response.setResults(results);
if (hasMoreResults) {
// TODO: consider pagination based on cursor / values rather than offset
PaginationToken token = PaginationToken.of(offset + pageSize, paginationParameters);
response.setNextPageToken(token.toBase64());
}
return response;
}
use of org.pmiops.workbench.exceptions.ServerErrorException in project workbench by all-of-us.
the class BigQueryService method filterBigQueryConfig.
public QueryJobConfiguration filterBigQueryConfig(QueryJobConfiguration queryJobConfiguration) {
CdrVersion cdrVersion = CdrVersionContext.getCdrVersion();
if (cdrVersion == null) {
throw new ServerErrorException("No CDR version specified");
}
String returnSql = queryJobConfiguration.getQuery().replace("${projectId}", cdrVersion.getBigqueryProject());
returnSql = returnSql.replace("${dataSetId}", cdrVersion.getBigqueryDataset());
return queryJobConfiguration.toBuilder().setQuery(returnSql).build();
}
Aggregations