use of bio.terra.workspace.client.ApiException in project terra-workspace-manager by DataBiosphere.
the class ControlledApplicationSharedGcsBucketLifecycle method doUserJourney.
@Override
public void doUserJourney(TestUserSpecification unused, WorkspaceApi workspaceApi) throws Exception {
ApiClient ownerApiClient = ClientTestUtils.getClientForTestUser(owner, server);
ApiClient wsmappApiClient = ClientTestUtils.getClientForTestUser(wsmapp, server);
WorkspaceApplicationApi ownerWsmAppApi = new WorkspaceApplicationApi(ownerApiClient);
ControlledGcpResourceApi wsmappResourceApi = new ControlledGcpResourceApi(wsmappApiClient);
// Owner adds a reader and a writer to the workspace
workspaceApi.grantRole(new GrantRoleRequestBody().memberEmail(reader.userEmail), getWorkspaceId(), IamRole.READER);
logger.info("Added {} as a reader to workspace {}", reader.userEmail, getWorkspaceId());
workspaceApi.grantRole(new GrantRoleRequestBody().memberEmail(writer.userEmail), getWorkspaceId(), IamRole.WRITER);
logger.info("Added {} as a writer to workspace {}", writer.userEmail, getWorkspaceId());
// Create the cloud context
String projectId = CloudContextMaker.createGcpCloudContext(getWorkspaceId(), workspaceApi);
assertNotNull(projectId);
logger.info("Created project {}", projectId);
// Create the bucket - should fail because application is not enabled
String bucketResourceName = RandomStringUtils.random(6, true, false);
ApiException createBucketFails = assertThrows(ApiException.class, () -> GcsBucketUtils.makeControlledGcsBucketAppShared(wsmappResourceApi, getWorkspaceId(), bucketResourceName, CloningInstructionsEnum.NOTHING));
// TODO: [PF-1208] this should be FORBIDDEN (403), but we are throwing the wrong thing
assertEquals(HttpStatusCodes.STATUS_CODE_UNAUTHORIZED, createBucketFails.getCode());
logger.info("Failed to create bucket, as expected");
// Enable the application in the workspace
WorkspaceApplicationDescription applicationDescription = ownerWsmAppApi.enableWorkspaceApplication(getWorkspaceId(), TEST_WSM_APP);
assertThat(applicationDescription.getApplicationState(), equalTo(ApplicationState.OPERATING));
logger.info("Enabled application in the workspace");
// Validate that it is enabled
WorkspaceApplicationDescription retrievedDescription = ownerWsmAppApi.getWorkspaceApplication(getWorkspaceId(), TEST_WSM_APP);
assertThat(applicationDescription, equalTo(retrievedDescription));
assertThat(applicationDescription.getWorkspaceApplicationState(), equalTo(WorkspaceApplicationState.ENABLED));
// Create the bucket - should work this time
CreatedControlledGcpGcsBucket createdBucket = GcsBucketUtils.makeControlledGcsBucketAppShared(wsmappResourceApi, getWorkspaceId(), bucketResourceName, CloningInstructionsEnum.NOTHING);
bucketName = createdBucket.getGcpBucket().getAttributes().getBucketName();
assertNotNull(bucketName);
logger.info("Created bucket {}", bucketName);
// Try to disable; should error because you cannot disable an app if it owns resources
// in the workspace.
ApiException disableAppFails = assertThrows(ApiException.class, () -> ownerWsmAppApi.disableWorkspaceApplication(getWorkspaceId(), TEST_WSM_APP));
assertEquals(HttpStatusCodes.STATUS_CODE_BAD_REQUEST, disableAppFails.getCode());
logger.info("Failed to disable app, as expected");
try (GcsBucketAccessTester tester = new GcsBucketAccessTester(wsmapp, bucketName, projectId)) {
tester.checkAccess(wsmapp, ControlledResourceIamRole.EDITOR);
tester.checkAccess(owner, ControlledResourceIamRole.WRITER);
tester.checkAccess(writer, ControlledResourceIamRole.WRITER);
tester.checkAccess(reader, ControlledResourceIamRole.READER);
}
// The reader should be able to enumerate the bucket.
ResourceApi readerResourceApi = ClientTestUtils.getResourceClient(reader, server);
ResourceList bucketList = readerResourceApi.enumerateResources(getWorkspaceId(), 0, 5, ResourceType.GCS_BUCKET, StewardshipType.CONTROLLED);
assertEquals(1, bucketList.getResources().size());
MultiResourcesUtils.assertResourceType(ResourceType.GCS_BUCKET, bucketList);
// Owner cannot delete the bucket through WSM
ControlledGcpResourceApi ownerResourceApi = new ControlledGcpResourceApi(ownerApiClient);
ApiException cannotDelete = assertThrows(ApiException.class, () -> GcsBucketUtils.deleteControlledGcsBucket(createdBucket.getResourceId(), getWorkspaceId(), ownerResourceApi));
// TODO: [PF-1208] this should be FORBIDDEN (403), but we are throwing the wrong thing
assertEquals(HttpStatusCodes.STATUS_CODE_UNAUTHORIZED, cannotDelete.getCode());
logger.info("Owner delete failed as expected");
// Application can delete the bucket through WSM
GcsBucketUtils.deleteControlledGcsBucket(createdBucket.getResourceId(), getWorkspaceId(), wsmappResourceApi);
logger.info("Application delete succeeded");
}
use of bio.terra.workspace.client.ApiException in project terra-workspace-manager by DataBiosphere.
the class PrivateControlledGcsBucketLifecycle method doUserJourney.
@Override
public void doUserJourney(TestUserSpecification testUser, WorkspaceApi workspaceApi) throws Exception {
String projectId = CloudContextMaker.createGcpCloudContext(getWorkspaceId(), workspaceApi);
ControlledGcpResourceApi workspaceOwnerResourceApi = ClientTestUtils.getControlledGcpResourceClient(testUser, server);
ControlledGcpResourceApi privateUserResourceApi = ClientTestUtils.getControlledGcpResourceClient(privateResourceUser, server);
workspaceApi.grantRole(new GrantRoleRequestBody().memberEmail(workspaceReader.userEmail), getWorkspaceId(), IamRole.READER);
logger.info("Added {} as a reader to workspace {}", workspaceReader.userEmail, getWorkspaceId());
workspaceApi.grantRole(new GrantRoleRequestBody().memberEmail(privateResourceUser.userEmail), getWorkspaceId(), IamRole.WRITER);
logger.info("Added {} as a writer to workspace {}", privateResourceUser.userEmail, getWorkspaceId());
// Create a private bucket, which privateResourceUser assigns to themselves.
// Cloud IAM permissions may take several minutes to sync, so we retry this operation until
// it succeeds.
CreatedControlledGcpGcsBucket bucket = ClientTestUtils.getWithRetryOnException(() -> createPrivateBucket(privateUserResourceApi));
UUID resourceId = bucket.getResourceId();
// Retrieve the bucket resource from WSM
logger.info("Retrieving bucket resource id {}", resourceId.toString());
GcpGcsBucketResource gotBucket = privateUserResourceApi.getBucket(getWorkspaceId(), resourceId);
String bucketName = gotBucket.getAttributes().getBucketName();
assertEquals(bucket.getGcpBucket().getAttributes().getBucketName(), bucketName);
// Assert the bucket is assigned to privateResourceUser, even though resource user was
// not specified
assertEquals(privateResourceUser.userEmail, gotBucket.getMetadata().getControlledResourceMetadata().getPrivateResourceUser().getUserName());
try (GcsBucketAccessTester tester = new GcsBucketAccessTester(privateResourceUser, bucketName, projectId)) {
tester.checkAccessWait(privateResourceUser, ControlledResourceIamRole.EDITOR);
// workspace owner can do nothing
tester.checkAccess(testUser, null);
tester.checkAccess(workspaceReader, null);
}
// Any workspace user should be able to enumerate all buckets, even though they can't access
// their contents.
ResourceApi readerApi = ClientTestUtils.getResourceClient(workspaceReader, server);
ResourceList bucketList = readerApi.enumerateResources(getWorkspaceId(), 0, 5, ResourceType.GCS_BUCKET, StewardshipType.CONTROLLED);
assertEquals(1, bucketList.getResources().size());
MultiResourcesUtils.assertResourceType(ResourceType.GCS_BUCKET, bucketList);
// Workspace owner has DELETER role and can delete the bucket through WSM
var ownerDeleteResult = deleteBucket(workspaceOwnerResourceApi, resourceId);
ClientTestUtils.assertJobSuccess("owner delete bucket", ownerDeleteResult.getJobReport(), ownerDeleteResult.getErrorReport());
// verify the bucket was deleted from WSM metadata
ApiException bucketIsMissing = assertThrows(ApiException.class, () -> workspaceOwnerResourceApi.getBucket(getWorkspaceId(), resourceId), "Incorrectly found a deleted bucket!");
assertEquals(HttpStatusCodes.STATUS_CODE_NOT_FOUND, bucketIsMissing.getCode());
// also verify it was deleted from GCP
Storage ownerStorageClient = ClientTestUtils.getGcpStorageClient(testUser, projectId);
Bucket maybeBucket = ownerStorageClient.get(bucketName);
assertNull(maybeBucket);
// TODO: PF-1218 - change these to negative tests - should error - when
// the ticket is complete. These exercise two create cases with currently
// valid combinations of private user.
PrivateResourceIamRoles roles = new PrivateResourceIamRoles();
roles.add(ControlledResourceIamRole.READER);
// Supply all private user parameters
PrivateResourceUser privateUserFull = new PrivateResourceUser().userName(privateResourceUser.userEmail).privateResourceIamRoles(roles);
CreatedControlledGcpGcsBucket userFullBucket = GcsBucketUtils.makeControlledGcsBucket(privateUserResourceApi, getWorkspaceId(), RESOURCE_PREFIX + UUID.randomUUID().toString(), /*bucketName=*/
null, AccessScope.PRIVATE_ACCESS, ManagedBy.USER, CloningInstructionsEnum.NOTHING, privateUserFull);
assertNotNull(userFullBucket.getGcpBucket().getAttributes().getBucketName());
deleteBucket(workspaceOwnerResourceApi, userFullBucket.getResourceId());
// Supply just the roles, but no email
PrivateResourceUser privateUserNoEmail = new PrivateResourceUser().userName(null).privateResourceIamRoles(roles);
CreatedControlledGcpGcsBucket userNoEmailBucket = GcsBucketUtils.makeControlledGcsBucket(privateUserResourceApi, getWorkspaceId(), RESOURCE_PREFIX + UUID.randomUUID().toString(), /*bucketName=*/
null, AccessScope.PRIVATE_ACCESS, ManagedBy.USER, CloningInstructionsEnum.NOTHING, privateUserNoEmail);
assertNotNull(userNoEmailBucket.getGcpBucket().getAttributes().getBucketName());
deleteBucket(workspaceOwnerResourceApi, userNoEmailBucket.getResourceId());
String uniqueBucketName = String.format("terra_%s_bucket", UUID.randomUUID().toString().replace("-", "_"));
CreatedControlledGcpGcsBucket bucketWithBucketNameSpecified = GcsBucketUtils.makeControlledGcsBucket(privateUserResourceApi, getWorkspaceId(), RESOURCE_PREFIX + UUID.randomUUID().toString(), /*bucketName=*/
uniqueBucketName, AccessScope.PRIVATE_ACCESS, ManagedBy.USER, CloningInstructionsEnum.NOTHING, privateUserFull);
assertEquals(uniqueBucketName, bucketWithBucketNameSpecified.getGcpBucket().getAttributes().getBucketName());
deleteBucket(workspaceOwnerResourceApi, bucketWithBucketNameSpecified.getResourceId());
}
use of bio.terra.workspace.client.ApiException in project terra-workspace-manager by DataBiosphere.
the class ControlledBigQueryDatasetLifecycle method doUserJourney.
@Override
protected void doUserJourney(TestUserSpecification testUser, WorkspaceApi workspaceApi) throws Exception {
ControlledGcpResourceApi ownerResourceApi = ClientTestUtils.getControlledGcpResourceClient(testUser, server);
// Add a writer the source workspace. Reader is already added by the base class
logger.info("Adding {} as writer to workspace {}", writer.userEmail, getWorkspaceId());
workspaceApi.grantRole(new GrantRoleRequestBody().memberEmail(writer.userEmail), getWorkspaceId(), IamRole.WRITER);
SamClientUtils.dumpResourcePolicy(testUser, server, "workspace", getWorkspaceId().toString());
// Create a shared BigQuery dataset
GcpBigQueryDatasetResource createdDataset = BqDatasetUtils.makeControlledBigQueryDatasetUserShared(ownerResourceApi, getWorkspaceId(), DATASET_RESOURCE_NAME, /*datasetId=*/
null, /*cloningInstructions=*/
null);
assertEquals(DATASET_RESOURCE_NAME, createdDataset.getAttributes().getDatasetId());
UUID resourceId = createdDataset.getMetadata().getResourceId();
// Retrieve the dataset resource
logger.info("Retrieving dataset resource id {}", resourceId.toString());
GcpBigQueryDatasetResource fetchedResource = ownerResourceApi.getBigQueryDataset(getWorkspaceId(), resourceId);
assertEquals(createdDataset, fetchedResource);
assertEquals(DATASET_RESOURCE_NAME, fetchedResource.getAttributes().getDatasetId());
createControlledDatasetWithBothResourceNameAndDatasetIdSpecified(ownerResourceApi);
BigQuery ownerBqClient = ClientTestUtils.getGcpBigQueryClient(testUser, getSourceProjectId());
BigQuery writerBqClient = ClientTestUtils.getGcpBigQueryClient(writer, getSourceProjectId());
BigQuery readerBqClient = ClientTestUtils.getGcpBigQueryClient(getWorkspaceReader(), getSourceProjectId());
// Workspace owner can create a table in this dataset
Table table = createTable(ownerBqClient, getSourceProjectId());
String tableName = table.getTableId().getTable();
// Workspace reader can read the table
// This is the reader's first use of cloud APIs after being added to the workspace, so we
// retry this operation until cloud IAM has properly synced.
var readTable = ClientTestUtils.getWithRetryOnException(() -> readerBqClient.getTable(table.getTableId()));
assertEquals(table, readTable);
logger.info("Read table {} as workspace reader", tableName);
// Workspace reader cannot modify tables
Table readerUpdatedTable = table.toBuilder().setDescription("A new table description").build();
assertThrows(BigQueryException.class, () -> readerBqClient.update(readerUpdatedTable), "Workspace reader was able to modify table metadata");
logger.info("Workspace reader could not modify table {} metadata as expected", tableName);
// Workspace reader cannot write data to tables
assertThrows(BigQueryException.class, () -> insertValueIntoTable(readerBqClient, "some value"), "Workspace reader was able to insert data into a table");
logger.info("Workspace reader could not modify table {} contents as expected", tableName);
// Workspace writer can also read the table
// This is the writer's first use of cloud APIs after being added to the workspace, so we
// retry this operation until cloud IAM has properly synced.
var writerReadTable = ClientTestUtils.getWithRetryOnException(() -> writerBqClient.getTable(table.getTableId()));
assertEquals(table, writerReadTable);
logger.info("Read table {} as workspace writer", tableName);
// In contrast, a workspace writer can write data to tables
String columnValue = "this value lives in a table";
insertValueIntoTable(writerBqClient, columnValue);
logger.info("Workspace writer wrote a row to table {}", tableName);
// Create a dataset to hold query results in the destination project.
ControlledGcpResourceApi readerResourceApi = ClientTestUtils.getControlledGcpResourceClient(getWorkspaceReader(), server);
String resultDatasetId = "temporary_result_dataset";
GcpBigQueryDatasetResource temporaryResultDataset = BqDatasetUtils.makeControlledBigQueryDatasetUserShared(readerResourceApi, getDestinationWorkspaceId(), "temporary_result_resource", resultDatasetId, CloningInstructionsEnum.NOTHING);
// The table does not exist yet, but will be created to hold query results.
TableId resultTableId = TableId.of(getDestinationProjectId(), resultDatasetId, BqDatasetUtils.BQ_RESULT_TABLE_NAME);
// Workspace reader can now read the row inserted above
assertEquals(columnValue, readValueFromTable(readerBqClient, resultTableId));
logger.info("Workspace reader read that row from table {}", tableName);
// Workspace writer can update the table metadata
String newDescription = "Another new table description";
Table writerUpdatedTable = table.toBuilder().setDescription(newDescription).build();
Table updatedTable = writerBqClient.update(writerUpdatedTable);
assertEquals(newDescription, updatedTable.getDescription());
logger.info("Workspace writer modified table {} metadata", tableName);
// Workspace owner can update the dataset resource through WSM
String resourceDescription = "a description for WSM";
Integer defaultTableLifetimeSec = 5400;
var updateDatasetRequest = new UpdateControlledGcpBigQueryDatasetRequestBody().description(resourceDescription).updateParameters(new GcpBigQueryDatasetUpdateParameters().defaultTableLifetime(defaultTableLifetimeSec));
ownerResourceApi.updateBigQueryDataset(updateDatasetRequest, getWorkspaceId(), resourceId);
var datasetAfterUpdate = ownerResourceApi.getBigQueryDataset(getWorkspaceId(), resourceId);
assertEquals(datasetAfterUpdate.getMetadata().getDescription(), resourceDescription);
logger.info("Workspace owner updated resource {}", resourceId);
// However, invalid updates are rejected.
String invalidName = "!!!invalid_name!!!";
var invalidUpdateDatasetRequest = new UpdateControlledGcpBigQueryDatasetRequestBody().name(invalidName);
ApiException invalidUpdateEx = assertThrows(ApiException.class, () -> ownerResourceApi.updateBigQueryDataset(invalidUpdateDatasetRequest, getWorkspaceId(), resourceId));
assertEquals(HttpStatusCodes.STATUS_CODE_BAD_REQUEST, invalidUpdateEx.getCode());
// Cloud metadata matches the updated values
Dataset cloudDataset = ownerBqClient.getDataset(DatasetId.of(getSourceProjectId(), DATASET_RESOURCE_NAME));
assertEquals(defaultTableLifetimeSec * 1000L, cloudDataset.getDefaultTableLifetime());
assertNull(cloudDataset.getDefaultPartitionExpirationMs());
// Workspace writer can delete the table we created earlier
logger.info("Deleting table {} from dataset {}", table.getTableId().getTable(), DATASET_RESOURCE_NAME);
assertTrue(writerBqClient.delete(TableId.of(getSourceProjectId(), DATASET_RESOURCE_NAME, table.getTableId().getTable())));
// Workspace reader can clean up the results table and dataset before cloning
readerResourceApi.deleteBigQueryDataset(getDestinationWorkspaceId(), temporaryResultDataset.getMetadata().getResourceId());
// Populate dataset with additional tables to verify cloning behavior
BqDatasetUtils.populateBigQueryDataset(createdDataset, testUser, getSourceProjectId());
// Verify workspace reader is able to clone the resource they can read
testCloneBigQueryDataset(createdDataset, getWorkspaceReader(), readerResourceApi);
// The reader should be able to enumerate the dataset.
ResourceApi readerApi = ClientTestUtils.getResourceClient(getWorkspaceReader(), server);
ResourceList datasetList = readerApi.enumerateResources(getWorkspaceId(), 0, 5, ResourceType.BIG_QUERY_DATASET, StewardshipType.CONTROLLED);
assertEquals(1, datasetList.getResources().size());
MultiResourcesUtils.assertResourceType(ResourceType.BIG_QUERY_DATASET, datasetList);
// Workspace writer cannot delete the dataset directly
var writerCannotDeleteException = assertThrows(BigQueryException.class, () -> writerBqClient.delete(DATASET_RESOURCE_NAME));
assertEquals(HttpStatusCodes.STATUS_CODE_FORBIDDEN, writerCannotDeleteException.getCode());
// Workspace owner cannot delete the dataset directly
var ownerCannotDeleteException = assertThrows(BigQueryException.class, () -> ownerBqClient.delete(DATASET_RESOURCE_NAME));
assertEquals(HttpStatusCodes.STATUS_CODE_FORBIDDEN, ownerCannotDeleteException.getCode());
// Workspace owner can delete the dataset through WSM
ownerResourceApi.deleteBigQueryDataset(getWorkspaceId(), resourceId);
}
use of bio.terra.workspace.client.ApiException in project terra-workspace-manager by DataBiosphere.
the class PrivateControlledAiNotebookInstanceLifecycle method doUserJourney.
@Override
@SuppressFBWarnings(value = "DLS_DEAD_LOCAL_STORE")
protected void doUserJourney(TestUserSpecification testUser, WorkspaceApi workspaceApi) throws Exception {
CloudContextMaker.createGcpCloudContext(getWorkspaceId(), workspaceApi);
workspaceApi.grantRole(new GrantRoleRequestBody().memberEmail(resourceUser.userEmail), getWorkspaceId(), IamRole.WRITER);
workspaceApi.grantRole(new GrantRoleRequestBody().memberEmail(otherWorkspaceUser.userEmail), getWorkspaceId(), IamRole.WRITER);
ControlledGcpResourceApi resourceUserApi = ClientTestUtils.getControlledGcpResourceClient(resourceUser, server);
CreatedControlledGcpAiNotebookInstanceResult creationResult = NotebookUtils.makeControlledNotebookUserPrivate(getWorkspaceId(), instanceId, /*location=*/
null, resourceUserApi);
UUID resourceId = creationResult.getAiNotebookInstance().getMetadata().getResourceId();
GcpAiNotebookInstanceResource resource = resourceUserApi.getAiNotebookInstance(getWorkspaceId(), resourceId);
assertEquals(instanceId, resource.getAttributes().getInstanceId(), "Notebook instance id is correct in GET response from WSM");
assertEquals(instanceId, creationResult.getAiNotebookInstance().getAttributes().getInstanceId(), "Notebook instance id is correct in create response from WSM");
assertEquals(resourceUser.userEmail, resource.getMetadata().getControlledResourceMetadata().getPrivateResourceUser().getUserName(), "User is the private user of the notebook");
assertEquals("us-central1-a", resource.getAttributes().getLocation(), "The notebook uses the default location because location is not specified.");
createAControlledAiNotebookInstanceWithoutSpecifiedInstanceId_validInstanceIdIsGenerated(resourceUserApi);
createAControlledAiNotebookInstanceWithoutSpecifiedInstanceId_specifyLocation(resourceUserApi);
String instanceName = String.format("projects/%s/locations/%s/instances/%s", resource.getAttributes().getProjectId(), resource.getAttributes().getLocation(), resource.getAttributes().getInstanceId());
AIPlatformNotebooks userNotebooks = ClientTestUtils.getAIPlatformNotebooksClient(resourceUser);
assertTrue(NotebookUtils.userHasProxyAccess(creationResult, resourceUser, resource.getAttributes().getProjectId()), "Private resource user has access to their notebook");
assertFalse(NotebookUtils.userHasProxyAccess(creationResult, otherWorkspaceUser, resource.getAttributes().getProjectId()), "Other workspace user does not have access to a private notebook");
// The user should be able to stop their notebook.
userNotebooks.projects().locations().instances().stop(instanceName, new StopInstanceRequest());
// The user should not be able to directly delete their notebook.
GoogleJsonResponseException directDeleteForbidden = assertThrows(GoogleJsonResponseException.class, () -> userNotebooks.projects().locations().instances().delete(instanceName).execute());
assertEquals(HttpStatus.SC_FORBIDDEN, directDeleteForbidden.getStatusCode(), "User may not delete notebook directly on GCP");
// Any workspace user should be able to enumerate all created notebooks, even though they can't
// read or write them.
ResourceApi otherUserApi = ClientTestUtils.getResourceClient(otherWorkspaceUser, server);
ResourceList notebookList = otherUserApi.enumerateResources(getWorkspaceId(), 0, 5, ResourceType.AI_NOTEBOOK, StewardshipType.CONTROLLED);
assertEquals(3, notebookList.getResources().size());
MultiResourcesUtils.assertResourceType(ResourceType.AI_NOTEBOOK, notebookList);
// Delete the AI Notebook through WSM.
DeleteControlledGcpAiNotebookInstanceResult deleteResult = resourceUserApi.deleteAiNotebookInstance(new DeleteControlledGcpAiNotebookInstanceRequest().jobControl(new JobControl().id(UUID.randomUUID().toString())), getWorkspaceId(), resourceId);
String deleteJobId = deleteResult.getJobReport().getId();
deleteResult = ClientTestUtils.pollWhileRunning(deleteResult, () -> resourceUserApi.getDeleteAiNotebookInstanceResult(getWorkspaceId(), deleteJobId), DeleteControlledGcpAiNotebookInstanceResult::getJobReport, Duration.ofSeconds(10));
ClientTestUtils.assertJobSuccess("delete ai notebook", deleteResult.getJobReport(), deleteResult.getErrorReport());
// Verify the notebook was deleted from WSM metadata.
ApiException notebookIsMissing = assertThrows(ApiException.class, () -> resourceUserApi.getAiNotebookInstance(getWorkspaceId(), resourceId), "Notebook is deleted from WSM");
assertEquals(HttpStatus.SC_NOT_FOUND, notebookIsMissing.getCode(), "Error from WSM is 404");
// Verify the notebook was deleted from GCP.
GoogleJsonResponseException notebookNotFound = assertThrows(GoogleJsonResponseException.class, () -> userNotebooks.projects().locations().instances().get(instanceName).execute(), "Notebook is deleted from GCP");
// GCP may respond with either 403 or 404 depending on how quickly this is called after deleting
// the notebook. Either response is valid in this case.
assertThat("Error from GCP is 403 or 404", notebookNotFound.getStatusCode(), anyOf(equalTo(HttpStatus.SC_NOT_FOUND), equalTo(HttpStatus.SC_FORBIDDEN)));
}
use of bio.terra.workspace.client.ApiException in project terra-workspace-manager by DataBiosphere.
the class EnumerateResources method doUserJourney.
@Override
public void doUserJourney(TestUserSpecification testUser, WorkspaceApi workspaceApi) throws Exception {
// Add second user to the workspace as a reader
workspaceApi.grantRole(new GrantRoleRequestBody().memberEmail(workspaceReader.userEmail), getWorkspaceId(), IamRole.READER);
// Case 1: fetch all
ResourceList enumList = ownerResourceApi.enumerateResources(getWorkspaceId(), 0, RESOURCE_COUNT, null, null);
logResult("fetchall", enumList);
// Make sure we got all of the expected ids
matchFullResourceList(enumList.getResources());
// Repeat case 1 as the workspace reader.
// As this is the first operation after modifying workspace IAM groups, retry here to compensate
// for the delay in GCP IAM propagation.
ResourceList readerEnumList = ClientTestUtils.getWithRetryOnException(() -> readerResourceApi.enumerateResources(getWorkspaceId(), 0, RESOURCE_COUNT, null, null));
logResult("fetchall reader", readerEnumList);
matchFullResourceList(readerEnumList.getResources());
// Case 2: fetch by pages
ResourceList page1List = ownerResourceApi.enumerateResources(getWorkspaceId(), 0, PAGE_SIZE, null, null);
logResult("page1", page1List);
assertThat(page1List.getResources().size(), equalTo(PAGE_SIZE));
ResourceList page2List = ownerResourceApi.enumerateResources(getWorkspaceId(), PAGE_SIZE, PAGE_SIZE, null, null);
logResult("page2", page2List);
assertThat(page2List.getResources().size(), equalTo(PAGE_SIZE));
ResourceList page3List = ownerResourceApi.enumerateResources(getWorkspaceId(), 2 * PAGE_SIZE, PAGE_SIZE, null, null);
logResult("page3", page3List);
assertThat(page3List.getResources().size(), lessThan(PAGE_SIZE));
List<ResourceDescription> descriptionList = new ArrayList<>();
descriptionList.addAll(page1List.getResources());
descriptionList.addAll(page2List.getResources());
descriptionList.addAll(page3List.getResources());
matchFullResourceList(descriptionList);
// Case 3: no results if offset is too high
ResourceList enumEmptyList = ownerResourceApi.enumerateResources(getWorkspaceId(), 10 * PAGE_SIZE, PAGE_SIZE, null, null);
assertThat(enumEmptyList.getResources().size(), equalTo(0));
// Case 4: filter by resource type
ResourceList buckets = ownerResourceApi.enumerateResources(getWorkspaceId(), 0, RESOURCE_COUNT, ResourceType.GCS_BUCKET, null);
logResult("buckets", buckets);
long expectedBuckets = resourceList.stream().filter(m -> m.getResourceType() == ResourceType.GCS_BUCKET).count();
logger.info("Counted {} buckets created", expectedBuckets);
// Note - assertThat exits out on an int -> long compare, so just don't do that.
long actualBuckets = buckets.getResources().size();
assertThat(actualBuckets, equalTo(expectedBuckets));
// Case 5: filter by stewardship type
ResourceList referencedList = ownerResourceApi.enumerateResources(getWorkspaceId(), 0, RESOURCE_COUNT, null, StewardshipType.REFERENCED);
logResult("referenced", referencedList);
long expectedReferenced = resourceList.stream().filter(m -> m.getStewardshipType() == StewardshipType.REFERENCED).count();
logger.info("Counted {} referenced created", expectedReferenced);
long actualReferenced = referencedList.getResources().size();
assertThat(actualReferenced, equalTo(expectedReferenced));
// Case 6: filter by resource and stewardship
ResourceList controlledBucketList = ownerResourceApi.enumerateResources(getWorkspaceId(), 0, RESOURCE_COUNT, ResourceType.GCS_BUCKET, StewardshipType.CONTROLLED);
logResult("controlledBucket", controlledBucketList);
long expectedControlledBuckets = resourceList.stream().filter(m -> (m.getStewardshipType() == StewardshipType.CONTROLLED && m.getResourceType() == ResourceType.GCS_BUCKET)).count();
logger.info("Counted {} controlled buckets created", expectedControlledBuckets);
long actualControlledBuckets = controlledBucketList.getResources().size();
assertThat(actualControlledBuckets, equalTo(expectedControlledBuckets));
// Case 7: validate error on invalid pagination params
ApiException invalidPaginationException = assertThrows(ApiException.class, () -> ownerResourceApi.enumerateResources(getWorkspaceId(), -11, 2, ResourceType.GCS_BUCKET, StewardshipType.CONTROLLED));
assertThat(invalidPaginationException.getMessage(), containsString("Invalid pagination"));
invalidPaginationException = assertThrows(ApiException.class, () -> ownerResourceApi.enumerateResources(getWorkspaceId(), 0, 0, ResourceType.GCS_BUCKET, StewardshipType.CONTROLLED));
assertThat(invalidPaginationException.getMessage(), containsString("Invalid pagination"));
}
Aggregations