use of bio.terra.workspace.api.ResourceApi in project terra-workspace-manager by DataBiosphere.
the class EnumerateResources method doSetup.
@Override
public void doSetup(List<TestUserSpecification> testUsers, WorkspaceApi workspaceApi) throws Exception {
// initialize workspace
super.doSetup(testUsers, workspaceApi);
assertThat("There must be two test users defined for this test.", testUsers != null && testUsers.size() == 2);
TestUserSpecification workspaceOwner = testUsers.get(0);
workspaceReader = testUsers.get(1);
// static assumptions
assertThat(PAGE_SIZE * 2, lessThan(RESOURCE_COUNT));
assertThat(PAGE_SIZE * 3, greaterThan(RESOURCE_COUNT));
ApiClient ownerApiClient = ClientTestUtils.getClientForTestUser(workspaceOwner, server);
ownerControlledGcpResourceApi = new ControlledGcpResourceApi(ownerApiClient);
ownerReferencedGcpResourceApi = new ReferencedGcpResourceApi(ownerApiClient);
ownerResourceApi = new ResourceApi(ownerApiClient);
ApiClient readerApiClient = ClientTestUtils.getClientForTestUser(workspaceReader, server);
readerResourceApi = new ResourceApi(readerApiClient);
// Create a cloud context for the workspace
CloudContextMaker.createGcpCloudContext(getWorkspaceId(), workspaceApi);
// create the resources for the test
logger.info("Creating {} resources", RESOURCE_COUNT);
resourceList = MultiResourcesUtils.makeResources(ownerReferencedGcpResourceApi, ownerControlledGcpResourceApi, getWorkspaceId());
logger.info("Created {} resources", resourceList.size());
}
use of bio.terra.workspace.api.ResourceApi in project terra-workspace-manager by DataBiosphere.
the class ControlledApplicationPrivateGcsBucketLifecycle method doUserJourney.
@Override
public void doUserJourney(TestUserSpecification testUser, WorkspaceApi workspaceApi) throws Exception {
ApiClient ownerApiClient = ClientTestUtils.getClientForTestUser(owner, server);
ApiClient wsmappApiClient = ClientTestUtils.getClientForTestUser(wsmapp, server);
WorkspaceApplicationApi ownerWsmAppApi = new WorkspaceApplicationApi(ownerApiClient);
ControlledGcpResourceApi wsmappResourceApi = new ControlledGcpResourceApi(wsmappApiClient);
// Owner adds a reader and a writer to the workspace
workspaceApi.grantRole(new GrantRoleRequestBody().memberEmail(reader.userEmail), getWorkspaceId(), IamRole.READER);
logger.info("Added {} as a reader to workspace {}", reader.userEmail, getWorkspaceId());
workspaceApi.grantRole(new GrantRoleRequestBody().memberEmail(writer.userEmail), getWorkspaceId(), IamRole.WRITER);
logger.info("Added {} as a writer to workspace {}", writer.userEmail, getWorkspaceId());
// Create the cloud context
String projectId = CloudContextMaker.createGcpCloudContext(getWorkspaceId(), workspaceApi);
assertNotNull(projectId);
logger.info("Created project {}", projectId);
// Enable the application in the workspace
WorkspaceApplicationDescription applicationDescription = ownerWsmAppApi.enableWorkspaceApplication(getWorkspaceId(), TEST_WSM_APP);
assertThat(applicationDescription.getApplicationState(), equalTo(ApplicationState.OPERATING));
logger.info("Enabled application {} in the workspace {}", TEST_WSM_APP, getWorkspaceId());
// CASE 1: Create a bucket with no assigned user
testNoAssignedUser(wsmappResourceApi, projectId);
// CASE 2: Create a bucket with workspace writer as READER
testAssignedReader(wsmappResourceApi, projectId);
// CASE 3: Create a bucket with workspace reader as WRITER
testAssignedWriter(wsmappResourceApi, projectId);
// All buckets should be visible to enumeration
ResourceApi ownerResourceApi = ClientTestUtils.getResourceClient(owner, server);
ResourceList bucketList = ownerResourceApi.enumerateResources(getWorkspaceId(), 0, 5, ResourceType.GCS_BUCKET, StewardshipType.CONTROLLED);
assertEquals(3, bucketList.getResources().size());
MultiResourcesUtils.assertResourceType(ResourceType.GCS_BUCKET, bucketList);
}
use of bio.terra.workspace.api.ResourceApi in project terra-workspace-manager by DataBiosphere.
the class ControlledApplicationSharedGcsBucketLifecycle method doUserJourney.
@Override
public void doUserJourney(TestUserSpecification unused, WorkspaceApi workspaceApi) throws Exception {
ApiClient ownerApiClient = ClientTestUtils.getClientForTestUser(owner, server);
ApiClient wsmappApiClient = ClientTestUtils.getClientForTestUser(wsmapp, server);
WorkspaceApplicationApi ownerWsmAppApi = new WorkspaceApplicationApi(ownerApiClient);
ControlledGcpResourceApi wsmappResourceApi = new ControlledGcpResourceApi(wsmappApiClient);
// Owner adds a reader and a writer to the workspace
workspaceApi.grantRole(new GrantRoleRequestBody().memberEmail(reader.userEmail), getWorkspaceId(), IamRole.READER);
logger.info("Added {} as a reader to workspace {}", reader.userEmail, getWorkspaceId());
workspaceApi.grantRole(new GrantRoleRequestBody().memberEmail(writer.userEmail), getWorkspaceId(), IamRole.WRITER);
logger.info("Added {} as a writer to workspace {}", writer.userEmail, getWorkspaceId());
// Create the cloud context
String projectId = CloudContextMaker.createGcpCloudContext(getWorkspaceId(), workspaceApi);
assertNotNull(projectId);
logger.info("Created project {}", projectId);
// Create the bucket - should fail because application is not enabled
String bucketResourceName = RandomStringUtils.random(6, true, false);
ApiException createBucketFails = assertThrows(ApiException.class, () -> GcsBucketUtils.makeControlledGcsBucketAppShared(wsmappResourceApi, getWorkspaceId(), bucketResourceName, CloningInstructionsEnum.NOTHING));
// TODO: [PF-1208] this should be FORBIDDEN (403), but we are throwing the wrong thing
assertEquals(HttpStatusCodes.STATUS_CODE_UNAUTHORIZED, createBucketFails.getCode());
logger.info("Failed to create bucket, as expected");
// Enable the application in the workspace
WorkspaceApplicationDescription applicationDescription = ownerWsmAppApi.enableWorkspaceApplication(getWorkspaceId(), TEST_WSM_APP);
assertThat(applicationDescription.getApplicationState(), equalTo(ApplicationState.OPERATING));
logger.info("Enabled application in the workspace");
// Validate that it is enabled
WorkspaceApplicationDescription retrievedDescription = ownerWsmAppApi.getWorkspaceApplication(getWorkspaceId(), TEST_WSM_APP);
assertThat(applicationDescription, equalTo(retrievedDescription));
assertThat(applicationDescription.getWorkspaceApplicationState(), equalTo(WorkspaceApplicationState.ENABLED));
// Create the bucket - should work this time
CreatedControlledGcpGcsBucket createdBucket = GcsBucketUtils.makeControlledGcsBucketAppShared(wsmappResourceApi, getWorkspaceId(), bucketResourceName, CloningInstructionsEnum.NOTHING);
bucketName = createdBucket.getGcpBucket().getAttributes().getBucketName();
assertNotNull(bucketName);
logger.info("Created bucket {}", bucketName);
// Try to disable; should error because you cannot disable an app if it owns resources
// in the workspace.
ApiException disableAppFails = assertThrows(ApiException.class, () -> ownerWsmAppApi.disableWorkspaceApplication(getWorkspaceId(), TEST_WSM_APP));
assertEquals(HttpStatusCodes.STATUS_CODE_BAD_REQUEST, disableAppFails.getCode());
logger.info("Failed to disable app, as expected");
try (GcsBucketAccessTester tester = new GcsBucketAccessTester(wsmapp, bucketName, projectId)) {
tester.checkAccess(wsmapp, ControlledResourceIamRole.EDITOR);
tester.checkAccess(owner, ControlledResourceIamRole.WRITER);
tester.checkAccess(writer, ControlledResourceIamRole.WRITER);
tester.checkAccess(reader, ControlledResourceIamRole.READER);
}
// The reader should be able to enumerate the bucket.
ResourceApi readerResourceApi = ClientTestUtils.getResourceClient(reader, server);
ResourceList bucketList = readerResourceApi.enumerateResources(getWorkspaceId(), 0, 5, ResourceType.GCS_BUCKET, StewardshipType.CONTROLLED);
assertEquals(1, bucketList.getResources().size());
MultiResourcesUtils.assertResourceType(ResourceType.GCS_BUCKET, bucketList);
// Owner cannot delete the bucket through WSM
ControlledGcpResourceApi ownerResourceApi = new ControlledGcpResourceApi(ownerApiClient);
ApiException cannotDelete = assertThrows(ApiException.class, () -> GcsBucketUtils.deleteControlledGcsBucket(createdBucket.getResourceId(), getWorkspaceId(), ownerResourceApi));
// TODO: [PF-1208] this should be FORBIDDEN (403), but we are throwing the wrong thing
assertEquals(HttpStatusCodes.STATUS_CODE_UNAUTHORIZED, cannotDelete.getCode());
logger.info("Owner delete failed as expected");
// Application can delete the bucket through WSM
GcsBucketUtils.deleteControlledGcsBucket(createdBucket.getResourceId(), getWorkspaceId(), wsmappResourceApi);
logger.info("Application delete succeeded");
}
use of bio.terra.workspace.api.ResourceApi in project terra-cli by DataBiosphere.
the class WorkspaceManagerService method enumerateAllResources.
/**
* Call the Workspace Manager GET "/api/workspaces/v1/{workspaceId}/resources" endpoint, possibly
* multiple times, to get a list of all resources (controlled and referenced) in the workspace.
* Throw an exception if the number of resources in the workspace is greater than the specified
* limit.
*
* @param workspaceId the workspace to query
* @param limit the maximum number of resources to return
* @return a list of resources
* @throws SystemException if the number of resources in the workspace > the specified limit
*/
public List<ResourceDescription> enumerateAllResources(UUID workspaceId, int limit) {
return handleClientExceptions(() -> {
// poll the enumerate endpoint until no results are returned, or we hit the limit
List<ResourceDescription> allResources = new ArrayList<>();
int numResultsReturned = 0;
do {
int offset = allResources.size();
ResourceList result = HttpUtils.callWithRetries(() -> new ResourceApi(apiClient).enumerateResources(workspaceId, offset, MAX_RESOURCES_PER_ENUMERATE_REQUEST, null, null), WorkspaceManagerService::isRetryable);
// add all fetched resources to the running list
numResultsReturned = result.getResources().size();
logger.debug("Called enumerate endpoints, fetched {} resources", numResultsReturned);
allResources.addAll(result.getResources());
// if we have fetched more than the limit, then throw an exception
if (allResources.size() > limit) {
throw new SystemException("Total number of resources (" + allResources.size() + ") exceeds the CLI limit (" + limit + ")");
}
// if this fetch returned less than the maximum allowed per request, then that indicates
// there are no more
} while (numResultsReturned >= MAX_RESOURCES_PER_ENUMERATE_REQUEST);
logger.debug("Fetched total number of resources: {}", allResources.size());
return allResources;
}, "Error enumerating resources in the workspace.");
}
use of bio.terra.workspace.api.ResourceApi in project terra-workspace-manager by DataBiosphere.
the class ControlledGcsBucketLifecycle method doUserJourney.
@Override
public void doUserJourney(TestUserSpecification testUser, WorkspaceApi workspaceApi) throws Exception {
ControlledGcpResourceApi resourceApi = ClientTestUtils.getControlledGcpResourceClient(testUser, server);
// Create a bucket with a name that's already taken by a publicly accessible bucket. WSM should
// have get and read access, as the bucket is open to everyone, but this should still fail.
// If bucket already exists, create bucket step has logic that checks if existing bucket is in
// same project (step reran, so not an error) or not (should throw error). This tests that
// logic.
ApiException publicDuplicateNameFails = assertThrows(ApiException.class, () -> createBucketAttempt(resourceApi, PUBLIC_GCP_BUCKET_NAME));
assertEquals(HttpStatus.SC_CONFLICT, publicDuplicateNameFails.getCode());
logger.info("Failed to create bucket with duplicate name of public bucket, as expected");
// Create the bucket - should work this time
CreatedControlledGcpGcsBucket bucket = createBucketAttempt(resourceApi, bucketName);
UUID resourceId = bucket.getResourceId();
// Try creating another bucket with the same name. This should fail and should not affect the
// existing resource.
ApiException duplicateNameFailsAgain = assertThrows(ApiException.class, () -> createBucketAttempt(resourceApi, bucketName));
assertEquals(HttpStatus.SC_CONFLICT, duplicateNameFailsAgain.getCode());
logger.info("Failed to create bucket with duplicate name again, as expected");
// Retrieve the bucket resource
logger.info("Retrieving bucket resource id {}", resourceId.toString());
GcpGcsBucketResource gotBucket = resourceApi.getBucket(getWorkspaceId(), resourceId);
assertEquals(bucket.getGcpBucket().getAttributes().getBucketName(), gotBucket.getAttributes().getBucketName());
assertEquals(bucketName, gotBucket.getAttributes().getBucketName());
try (GcsBucketAccessTester tester = new GcsBucketAccessTester(testUser, bucketName, getSourceProjectId())) {
tester.checkAccess(testUser, ControlledResourceIamRole.EDITOR);
tester.checkAccessWait(getWorkspaceReader(), ControlledResourceIamRole.READER);
}
// Populate bucket to test that objects are cloned
final Storage sourceOwnerStorageClient = ClientTestUtils.getGcpStorageClient(testUser, getSourceProjectId());
final BlobId blobId = BlobId.of(bucketName, GCS_BLOB_NAME);
final BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType("text/plain").build();
final Blob createdFile = sourceOwnerStorageClient.create(blobInfo, GCS_BLOB_CONTENT.getBytes(StandardCharsets.UTF_8));
logger.info("Wrote blob {} to bucket", createdFile.getBlobId());
// Clone the bucket into another workspace
ControlledGcpResourceApi readerControlledResourceApi = ClientTestUtils.getControlledGcpResourceClient(getWorkspaceReader(), server);
testCloneBucket(bucket.getGcpBucket(), getWorkspaceReader(), readerControlledResourceApi);
// Delete file after successful clone so that source bucket deletion later is faster
sourceOwnerStorageClient.delete(blobId);
// Update the bucket
final GcpGcsBucketResource updatedResource = updateBucketAttempt(resourceApi, resourceId, UPDATED_BUCKET_RESOURCE_NAME, UPDATED_BUCKET_RESOURCE_DESCRIPTION, BUCKET_UPDATE_PARAMETER_1);
logger.info("Updated resource name to {} and description to {}", updatedResource.getMetadata().getName(), updatedResource.getMetadata().getDescription());
assertEquals(UPDATED_BUCKET_RESOURCE_NAME, updatedResource.getMetadata().getName());
assertEquals(UPDATED_BUCKET_RESOURCE_DESCRIPTION, updatedResource.getMetadata().getDescription());
assertEquals(CloningInstructionsEnum.DEFINITION, updatedResource.getMetadata().getCloningInstructions());
// However, invalid updates are rejected.
String invalidName = "!!!invalid_name!!!";
ApiException invalidUpdateEx = assertThrows(ApiException.class, () -> updateBucketAttempt(resourceApi, resourceId, invalidName, /*updatedDescription=*/
null, /*updateParameters=*/
null));
assertEquals(HttpStatusCodes.STATUS_CODE_BAD_REQUEST, invalidUpdateEx.getCode());
Storage ownerStorageClient = ClientTestUtils.getGcpStorageClient(testUser, getSourceProjectId());
final Bucket retrievedUpdatedBucket = ownerStorageClient.get(bucketName, BucketGetOption.fields(BucketField.LIFECYCLE, BucketField.STORAGE_CLASS));
logger.info("Retrieved bucket {}", retrievedUpdatedBucket.toString());
assertEquals(StorageClass.NEARLINE, retrievedUpdatedBucket.getStorageClass());
final List<? extends LifecycleRule> lifecycleRules = retrievedUpdatedBucket.getLifecycleRules();
lifecycleRules.forEach(r -> logger.info("Lifecycle rule: {}", r.toString()));
assertThat(lifecycleRules, hasSize(1));
verifyUpdatedLifecycleRules(lifecycleRules);
final GcpGcsBucketResource resource2 = updateBucketAttempt(resourceApi, resourceId, null, null, BUCKET_UPDATE_PARAMETERS_2);
final Bucket retrievedUpdatedBucket2 = ownerStorageClient.get(bucketName, BucketGetOption.fields(BucketField.LIFECYCLE, BucketField.STORAGE_CLASS));
assertEquals(StorageClass.COLDLINE, retrievedUpdatedBucket2.getStorageClass());
// no change
assertEquals(UPDATED_BUCKET_RESOURCE_NAME, resource2.getMetadata().getName());
assertEquals(UPDATED_BUCKET_RESOURCE_DESCRIPTION, // no change
resource2.getMetadata().getDescription());
// no change
verifyUpdatedLifecycleRules(retrievedUpdatedBucket2.getLifecycleRules());
// test without UpdateParameters
final GcpGcsBucketResource resource3 = updateBucketAttempt(resourceApi, resourceId, UPDATED_BUCKET_RESOURCE_NAME_2, null, null);
final Bucket retrievedUpdatedBucket3 = ownerStorageClient.get(bucketName, BucketGetOption.fields(BucketField.LIFECYCLE, BucketField.STORAGE_CLASS));
assertEquals(UPDATED_BUCKET_RESOURCE_NAME_2, resource3.getMetadata().getName());
assertEquals(UPDATED_BUCKET_RESOURCE_DESCRIPTION, // no change
resource3.getMetadata().getDescription());
// no change
assertEquals(StorageClass.COLDLINE, retrievedUpdatedBucket3.getStorageClass());
// no change
verifyUpdatedLifecycleRules(retrievedUpdatedBucket3.getLifecycleRules());
// Enumerate the bucket
ResourceApi readerApi = ClientTestUtils.getResourceClient(getWorkspaceReader(), server);
ResourceList bucketList = readerApi.enumerateResources(getWorkspaceId(), 0, 5, ResourceType.GCS_BUCKET, StewardshipType.CONTROLLED);
assertEquals(1, bucketList.getResources().size());
MultiResourcesUtils.assertResourceType(ResourceType.GCS_BUCKET, bucketList);
// Owner can delete the bucket through WSM
GcsBucketUtils.deleteControlledGcsBucket(resourceId, getWorkspaceId(), resourceApi);
// verify the bucket was deleted from WSM metadata
ApiException bucketNotFound = assertThrows(ApiException.class, () -> resourceApi.getBucket(getWorkspaceId(), resourceId), "Incorrectly found a deleted bucket!");
assertEquals(HttpStatusCodes.STATUS_CODE_NOT_FOUND, bucketNotFound.getCode());
// also verify it was deleted from GCP
Bucket maybeBucket = ownerStorageClient.get(bucketName);
assertNull(maybeBucket);
bucketName = null;
// Delete the cloud context. This is not required. Just some exercise for deleteCloudContext
CloudContextMaker.deleteGcpCloudContext(getWorkspaceId(), workspaceApi);
logger.info("Cloud context deleted. User Journey complete.");
}
Aggregations