use of bio.terra.workspace.generated.model.ApiGcpGcsBucketCreationParameters in project terra-workspace-manager by DataBiosphere.
the class CreateGcsBucketStepTest method testCreatesBucket.
@Test
public void testCreatesBucket() throws RetryException, InterruptedException {
final ApiGcpGcsBucketCreationParameters creationParameters = getGoogleBucketCreationParameters();
CreateGcsBucketStep createGcsBucketStep = new CreateGcsBucketStep(mockCrlService, ControlledResourceFixtures.getBucketResource(creationParameters.getName()), mockGcpCloudContextService);
final FlightMap inputFlightMap = new FlightMap();
inputFlightMap.put(WorkspaceFlightMapKeys.ControlledResourceKeys.CREATION_PARAMETERS, creationParameters);
inputFlightMap.makeImmutable();
doReturn(inputFlightMap).when(mockFlightContext).getInputParameters();
final StepResult stepResult = createGcsBucketStep.doStep(mockFlightContext);
assertThat(stepResult, equalTo(StepResult.getStepResultSuccess()));
final BucketInfo info = bucketInfoCaptor.getValue();
assertThat(info.getName(), equalTo(creationParameters.getName()));
assertThat(info.getLocation(), equalTo(DEFAULT_REGION));
assertThat(info.getStorageClass(), equalTo(StorageClass.STANDARD));
assertThat(info.getLifecycleRules(), hasSize(equalTo(2)));
final LifecycleRule expectedDeleteRule = new LifecycleRule(LifecycleAction.newDeleteAction(), LifecycleCondition.newBuilder().setAge(64).setCreatedBefore(null).setNumberOfNewerVersions(2).setIsLive(true).setMatchesStorageClass(Collections.singletonList(StorageClass.ARCHIVE)).build());
final LifecycleRule deleteRule = info.getLifecycleRules().get(0);
assertEquals(expectedDeleteRule, deleteRule);
final LifecycleRule expectedStorageClassRule = new LifecycleRule(LifecycleAction.newSetStorageClassAction(StorageClass.NEARLINE), LifecycleCondition.newBuilder().setAge(null).setCreatedBefore(toGoogleDateTime(OffsetDateTime.of(2007, 1, 3, 0, 0, 0, 0, ZoneOffset.UTC))).setNumberOfNewerVersions(null).setIsLive(null).setMatchesStorageClass(Collections.singletonList(StorageClass.STANDARD)).build());
final LifecycleRule storageClassRule = info.getLifecycleRules().get(1);
assertEquals(expectedStorageClassRule, storageClassRule);
}
use of bio.terra.workspace.generated.model.ApiGcpGcsBucketCreationParameters in project terra-workspace-manager by DataBiosphere.
the class CreateGcsBucketStep method doStep.
@Override
public StepResult doStep(FlightContext flightContext) throws InterruptedException, RetryException {
FlightMap inputMap = flightContext.getInputParameters();
ApiGcpGcsBucketCreationParameters creationParameters = inputMap.get(CREATION_PARAMETERS, ApiGcpGcsBucketCreationParameters.class);
String projectId = gcpCloudContextService.getRequiredGcpProject(resource.getWorkspaceId());
BucketInfo.Builder bucketInfoBuilder = BucketInfo.newBuilder(resource.getBucketName()).setLocation(Optional.ofNullable(creationParameters.getLocation()).orElse(DEFAULT_REGION));
// Remaining creation parameters are optional
Optional.ofNullable(creationParameters.getDefaultStorageClass()).map(GcsApiConversions::toGcsApi).ifPresent(bucketInfoBuilder::setStorageClass);
bucketInfoBuilder.setLifecycleRules(Optional.ofNullable(creationParameters.getLifecycle()).map(GcsApiConversions::toGcsApiRulesList).orElse(Collections.emptyList()));
BucketInfo.IamConfiguration iamConfiguration = BucketInfo.IamConfiguration.newBuilder().setIsUniformBucketLevelAccessEnabled(true).build();
bucketInfoBuilder.setIamConfiguration(iamConfiguration);
// Uniqueness within the project is already verified in WSM's DB earlier in this flight.
try {
Optional<Bucket> existingBucket = getBucket(resource.getBucketName());
if (existingBucket.isEmpty()) {
StorageCow storageCow = crlService.createStorageCow(projectId);
storageCow.create(bucketInfoBuilder.build());
} else if (bucketInProject(existingBucket.get(), projectId)) {
logger.info("Bucket {} already exists in workspace project, this is a Stairway retry. Continuing.", resource.getBucketName());
} else {
throw new DuplicateResourceException("The provided bucket name is already in use, please choose another.");
}
} catch (StorageException storageException) {
// in GCP's global bucket namespace, even if we don't have permission to GET it.
if (storageException.getCode() == HttpStatus.SC_CONFLICT) {
throw new DuplicateResourceException("The provided bucket name is already in use, please choose another.", storageException);
}
if (storageException.getCode() == HttpStatus.SC_BAD_REQUEST) {
throw new BadRequestException("Received 400 BAD_REQUEST exception when creating a new gcs-bucket", storageException);
}
// Other cloud errors are unexpected here, rethrow.
throw storageException;
}
return StepResult.getStepResultSuccess();
}
use of bio.terra.workspace.generated.model.ApiGcpGcsBucketCreationParameters in project terra-workspace-manager by DataBiosphere.
the class CopyGcsBucketDefinitionStep method doStep.
@Override
public StepResult doStep(FlightContext flightContext) throws InterruptedException, RetryException {
final FlightMap inputParameters = flightContext.getInputParameters();
final FlightMap workingMap = flightContext.getWorkingMap();
final CloningInstructions cloningInstructions = Optional.ofNullable(inputParameters.get(ControlledResourceKeys.CLONING_INSTRUCTIONS, CloningInstructions.class)).orElse(sourceBucket.getCloningInstructions());
// future steps need the resolved cloning instructions
workingMap.put(ControlledResourceKeys.CLONING_INSTRUCTIONS, cloningInstructions);
if (CloningInstructions.COPY_NOTHING.equals(cloningInstructions)) {
final ApiClonedControlledGcpGcsBucket noOpResult = new ApiClonedControlledGcpGcsBucket().effectiveCloningInstructions(cloningInstructions.toApiModel()).bucket(null).sourceWorkspaceId(sourceBucket.getWorkspaceId()).sourceResourceId(sourceBucket.getResourceId());
FlightUtils.setResponse(flightContext, noOpResult, HttpStatus.OK);
return StepResult.getStepResultSuccess();
}
// todo: handle COPY_REFERENCE PF-811, PF-812
final String resourceName = FlightUtils.getInputParameterOrWorkingValue(flightContext, ResourceKeys.RESOURCE_NAME, ResourceKeys.PREVIOUS_RESOURCE_NAME, String.class);
final String description = FlightUtils.getInputParameterOrWorkingValue(flightContext, ResourceKeys.RESOURCE_DESCRIPTION, ResourceKeys.PREVIOUS_RESOURCE_DESCRIPTION, String.class);
final String bucketName = Optional.ofNullable(inputParameters.get(ControlledResourceKeys.DESTINATION_BUCKET_NAME, String.class)).orElseGet(this::randomBucketName);
final PrivateResourceState privateResourceState = sourceBucket.getAccessScope() == AccessScopeType.ACCESS_SCOPE_PRIVATE ? PrivateResourceState.INITIALIZING : PrivateResourceState.NOT_APPLICABLE;
// Store effective bucket name for destination
workingMap.put(ControlledResourceKeys.DESTINATION_BUCKET_NAME, bucketName);
final UUID destinationWorkspaceId = inputParameters.get(ControlledResourceKeys.DESTINATION_WORKSPACE_ID, UUID.class);
// bucket resource for create flight
ControlledResourceFields commonFields = ControlledResourceFields.builder().workspaceId(destinationWorkspaceId).resourceId(// random ID for new resource
UUID.randomUUID()).name(resourceName).description(description).cloningInstructions(sourceBucket.getCloningInstructions()).assignedUser(sourceBucket.getAssignedUser().orElse(null)).accessScope(sourceBucket.getAccessScope()).managedBy(sourceBucket.getManagedBy()).applicationId(sourceBucket.getApplicationId()).privateResourceState(privateResourceState).build();
ControlledGcsBucketResource destinationBucketResource = ControlledGcsBucketResource.builder().bucketName(bucketName).common(commonFields).build();
final ApiGcpGcsBucketCreationParameters destinationCreationParameters = getDestinationCreationParameters(inputParameters, workingMap);
final ControlledResourceIamRole iamRole = IamRoleUtils.getIamRoleForAccessScope(sourceBucket.getAccessScope());
// Launch a CreateControlledResourcesFlight to make the destination bucket
final ControlledGcsBucketResource clonedBucket = controlledResourceService.createControlledResourceSync(destinationBucketResource, iamRole, userRequest, destinationCreationParameters).castByEnum(WsmResourceType.CONTROLLED_GCP_GCS_BUCKET);
workingMap.put(ControlledResourceKeys.CLONED_RESOURCE_DEFINITION, clonedBucket);
final ApiCreatedControlledGcpGcsBucket apiCreatedBucket = new ApiCreatedControlledGcpGcsBucket().gcpBucket(clonedBucket.toApiResource()).resourceId(destinationBucketResource.getResourceId());
final ApiClonedControlledGcpGcsBucket apiBucketResult = new ApiClonedControlledGcpGcsBucket().effectiveCloningInstructions(cloningInstructions.toApiModel()).bucket(apiCreatedBucket).sourceWorkspaceId(sourceBucket.getWorkspaceId()).sourceResourceId(sourceBucket.getResourceId());
workingMap.put(ControlledResourceKeys.CLONE_DEFINITION_RESULT, apiBucketResult);
if (cloningInstructions.equals(CloningInstructions.COPY_DEFINITION)) {
FlightUtils.setResponse(flightContext, apiBucketResult, HttpStatus.OK);
}
return StepResult.getStepResultSuccess();
}
use of bio.terra.workspace.generated.model.ApiGcpGcsBucketCreationParameters in project terra-workspace-manager by DataBiosphere.
the class PrivateResourceCleanupServiceTest method cleanupResourcesSuppressExceptions_cleansApplicationPrivateResource_succeeds.
@Test
@DisabledIfEnvironmentVariable(named = "TEST_ENV", matches = BUFFER_SERVICE_DISABLED_ENVS_REG_EX)
void cleanupResourcesSuppressExceptions_cleansApplicationPrivateResource_succeeds() {
// Default user owns the workspace and group. Secondary user has workspace membership via group.
// Add second user to group
addUserToGroup(groupName, userAccessUtils.getSecondUserEmail(), ownerGroupApi);
// Add group to workspace as writer
SamRethrow.onInterrupted(() -> samService.grantWorkspaceRole(workspace.getWorkspaceId(), userAccessUtils.defaultUserAuthRequest(), WsmIamRole.WRITER, groupEmail), "grantWorkspaceRole");
// Enable the WSM test app in this workspace. This has a test user as the "service account" so
// we can delegate credentials normally.
App appConfig = getAppBySa(applicationAccessUtils.getApplicationSaEmail());
UUID appId = UUID.fromString(appConfig.getIdentifier());
AuthenticatedUserRequest appRequest = applicationAccessUtils.applicationSaAuthenticatedUserRequest();
wsmApplicationService.enableWorkspaceApplication(userAccessUtils.defaultUserAuthRequest(), workspace.getWorkspaceId(), appId);
// Create application private bucket assigned to second user.
ControlledResourceFields commonFields = ControlledResourceFixtures.makeDefaultControlledResourceFieldsBuilder().workspaceId(workspace.getWorkspaceId()).accessScope(AccessScopeType.ACCESS_SCOPE_PRIVATE).managedBy(ManagedByType.MANAGED_BY_APPLICATION).applicationId(appId).assignedUser(userAccessUtils.getSecondUserEmail()).build();
ControlledGcsBucketResource resource = ControlledGcsBucketResource.builder().common(commonFields).bucketName(ControlledResourceFixtures.uniqueBucketName()).build();
ApiGcpGcsBucketCreationParameters creationParameters = new ApiGcpGcsBucketCreationParameters().location("us-central1");
// Create resource as application.
controlledResourceService.createControlledResourceSync(resource, ControlledResourceIamRole.WRITER, appRequest, creationParameters);
// Verify second user can read the private resource in Sam.
SamRethrow.onInterrupted(() -> samService.checkAuthz(userAccessUtils.secondUserAuthRequest(), resource.getCategory().getSamResourceName(), resource.getResourceId().toString(), SamControlledResourceActions.READ_ACTION), "checkResourceAuth");
// Remove second user from workspace via group.
removeUserFromGroup(groupName, userAccessUtils.getSecondUserEmail(), ownerGroupApi);
// Verify second user is no longer in workspace, but still has resource access because cleanup
// hasn't run yet.
assertFalse(SamRethrow.onInterrupted(() -> samService.isAuthorized(userAccessUtils.secondUserAuthRequest(), SamResource.WORKSPACE, resource.getWorkspaceId().toString(), SamWorkspaceAction.READ), "checkResourceAuth"));
assertTrue(SamRethrow.onInterrupted(() -> samService.isAuthorized(userAccessUtils.secondUserAuthRequest(), resource.getCategory().getSamResourceName(), resource.getResourceId().toString(), SamControlledResourceActions.READ_ACTION), "checkResourceAuth"));
// Manually enable and run cleanup.
privateResourceCleanupConfiguration.setEnabled(true);
// Calling "cleanupResources" manually lets us skip waiting for the cronjob to trigger.
privateResourceCleanupService.cleanupResourcesSuppressExceptions();
// Verify second user can no longer read the resource.
assertFalse(SamRethrow.onInterrupted(() -> samService.isAuthorized(userAccessUtils.secondUserAuthRequest(), resource.getCategory().getSamResourceName(), resource.getResourceId().toString(), SamControlledResourceActions.READ_ACTION), "checkResourceAuth"));
// Verify resource is marked "abandoned"
ControlledResource dbResource = resourceDao.getResource(resource.getWorkspaceId(), resource.getResourceId()).castToControlledResource();
assertEquals(PrivateResourceState.ABANDONED, dbResource.getPrivateResourceState().get());
// Application can still read the resource, because applications have EDITOR role on their
// application-private resources.
assertTrue(SamRethrow.onInterrupted(() -> samService.isAuthorized(appRequest, resource.getCategory().getSamResourceName(), resource.getResourceId().toString(), SamControlledResourceActions.READ_ACTION), "checkResourceAuth"));
}
use of bio.terra.workspace.generated.model.ApiGcpGcsBucketCreationParameters in project terra-workspace-manager by DataBiosphere.
the class PrivateResourceCleanupServiceTest method cleanupResourcesSuppressExceptions_cleansUserPrivateResource_succeeds.
@Test
@DisabledIfEnvironmentVariable(named = "TEST_ENV", matches = BUFFER_SERVICE_DISABLED_ENVS_REG_EX)
void cleanupResourcesSuppressExceptions_cleansUserPrivateResource_succeeds() {
// Default user owns the workspace and group. Secondary user has workspace membership via group.
// Add second user to group
addUserToGroup(groupName, userAccessUtils.getSecondUserEmail(), ownerGroupApi);
// Add group to workspace as writer
SamRethrow.onInterrupted(() -> samService.grantWorkspaceRole(workspace.getWorkspaceId(), userAccessUtils.defaultUserAuthRequest(), WsmIamRole.WRITER, groupEmail), "grantWorkspaceRole");
// Create private bucket as second user.
ControlledResourceFields commonFields = ControlledResourceFixtures.makeDefaultControlledResourceFieldsBuilder().workspaceId(workspace.getWorkspaceId()).accessScope(AccessScopeType.ACCESS_SCOPE_PRIVATE).managedBy(ManagedByType.MANAGED_BY_USER).assignedUser(userAccessUtils.getSecondUserEmail()).build();
ControlledGcsBucketResource resource = ControlledGcsBucketResource.builder().common(commonFields).bucketName(ControlledResourceFixtures.uniqueBucketName()).build();
ApiGcpGcsBucketCreationParameters creationParameters = new ApiGcpGcsBucketCreationParameters().location("us-central1");
controlledResourceService.createControlledResourceSync(resource, ControlledResourceIamRole.EDITOR, userAccessUtils.defaultUserAuthRequest(), creationParameters);
// Verify second user can read the private resource in Sam.
SamRethrow.onInterrupted(() -> samService.checkAuthz(userAccessUtils.secondUserAuthRequest(), resource.getCategory().getSamResourceName(), resource.getResourceId().toString(), SamControlledResourceActions.READ_ACTION), "checkResourceAuth");
// Remove second user from workspace via group
removeUserFromGroup(groupName, userAccessUtils.getSecondUserEmail(), ownerGroupApi);
// Verify second user is no longer in workspace, but still has resource access because cleanup
// hasn't run yet.
assertFalse(SamRethrow.onInterrupted(() -> samService.isAuthorized(userAccessUtils.secondUserAuthRequest(), SamResource.WORKSPACE, resource.getWorkspaceId().toString(), SamWorkspaceAction.READ), "checkResourceAuth"));
assertTrue(SamRethrow.onInterrupted(() -> samService.isAuthorized(userAccessUtils.secondUserAuthRequest(), resource.getCategory().getSamResourceName(), resource.getResourceId().toString(), SamControlledResourceActions.READ_ACTION), "checkResourceAuth"));
// Manually enable and run cleanup.
privateResourceCleanupConfiguration.setEnabled(true);
// Calling "cleanupResources" manually lets us skip waiting for the cronjob to trigger.
privateResourceCleanupService.cleanupResourcesSuppressExceptions();
// Verify second user can no longer read the resource.
assertFalse(SamRethrow.onInterrupted(() -> samService.isAuthorized(userAccessUtils.secondUserAuthRequest(), resource.getCategory().getSamResourceName(), resource.getResourceId().toString(), SamControlledResourceActions.READ_ACTION), "checkResourceAuth"));
// Verify resource is marked "abandoned"
ControlledResource dbResource = resourceDao.getResource(resource.getWorkspaceId(), resource.getResourceId()).castToControlledResource();
assertEquals(PrivateResourceState.ABANDONED, dbResource.getPrivateResourceState().get());
}
Aggregations