use of bio.terra.workspace.service.resource.exception.DuplicateResourceException in project terra-workspace-manager by DataBiosphere.
the class RawDaoTestFixture method storeResource.
// Write a resource into the database from bare parts
@WriteTransaction
public void storeResource(String workspaceId, String cloudPlatform, String resourceId, String name, String description, String stewardshipType, String resourceType, String resourceFamily, String cloningInstructions, String attributes, String accessScope, String managedBy, String associatedApp, String assignedUser, String privateResourceState) {
final String sql = "INSERT INTO resource (workspace_id, cloud_platform, resource_id, name, description, stewardship_type," + " exact_resource_type, resource_type, cloning_instructions, attributes," + " access_scope, managed_by, associated_app, assigned_user, private_resource_state)" + " VALUES (:workspace_id, :cloud_platform, :resource_id, :name, :description, :stewardship_type," + " :exact_resource_type, :resource_type, :cloning_instructions, cast(:attributes AS jsonb)," + " :access_scope, :managed_by, :associated_app, :assigned_user, :private_resource_state)";
final var params = new MapSqlParameterSource().addValue("workspace_id", workspaceId).addValue("cloud_platform", cloudPlatform).addValue("resource_id", resourceId).addValue("name", name).addValue("description", description).addValue("stewardship_type", stewardshipType).addValue("exact_resource_type", resourceType).addValue("resource_type", resourceFamily).addValue("cloning_instructions", cloningInstructions).addValue("attributes", attributes).addValue("access_scope", accessScope).addValue("managed_by", managedBy).addValue("associated_app", associatedApp).addValue("assigned_user", assignedUser).addValue("private_resource_state", privateResourceState);
try {
jdbcTemplate.update(sql, params);
logger.info("Inserted record for resource {} for workspace {}", resourceId, workspaceId);
} catch (DuplicateKeyException e) {
throw new DuplicateResourceException(String.format("A resource already exists in the workspace that has the same name (%s) or the same id (%s)", name, resourceId));
}
}
use of bio.terra.workspace.service.resource.exception.DuplicateResourceException in project terra-workspace-manager by DataBiosphere.
the class ResourceDao method validateUniqueAzureVm.
private void validateUniqueAzureVm(ControlledAzureVmResource resource) {
// This should take into account azure uniqueness param, namely the fields in `AzureContext`
String sql = "SELECT COUNT(1)" + " FROM resource" + " WHERE resource_type = :resource_type" + " AND stewardship_type = :stewardship_type" + " AND attributes->>'vmName' = :vm_name";
MapSqlParameterSource sqlParams = new MapSqlParameterSource().addValue("resource_type", AZURE_VM.toSql()).addValue("stewardship_type", CONTROLLED.toSql()).addValue("vm_name", resource.getVmName());
Integer matchingCount = jdbcTemplate.queryForObject(sql, sqlParams, Integer.class);
if (matchingCount != null && matchingCount > 0) {
throw new DuplicateResourceException(String.format("An Azure Vm with ID %s already exists", resource.getVmName()));
}
}
use of bio.terra.workspace.service.resource.exception.DuplicateResourceException in project terra-workspace-manager by DataBiosphere.
the class ResourceDao method validateUniqueBigQueryDataset.
private void validateUniqueBigQueryDataset(ControlledBigQueryDatasetResource datasetResource) {
// Workspace ID is a proxy for project ID, which works because there is a permanent, 1:1
// correspondence between workspaces and GCP projects.
String sql = "SELECT COUNT(1)" + " FROM resource" + " WHERE resource_type = :resource_type" + " AND stewardship_type = :stewardship_type" + " AND workspace_id = :workspace_id" + " AND attributes->>'datasetName' = :dataset_name";
MapSqlParameterSource sqlParams = new MapSqlParameterSource().addValue("resource_type", BIG_QUERY_DATASET.toSql()).addValue("stewardship_type", CONTROLLED.toSql()).addValue("workspace_id", datasetResource.getWorkspaceId().toString()).addValue("dataset_name", datasetResource.getDatasetName());
Integer matchingCount = jdbcTemplate.queryForObject(sql, sqlParams, Integer.class);
if (matchingCount != null && matchingCount > 0) {
throw new DuplicateResourceException(String.format("A BigQuery dataset with ID %s already exists", datasetResource.getDatasetName()));
}
}
use of bio.terra.workspace.service.resource.exception.DuplicateResourceException in project terra-workspace-manager by DataBiosphere.
the class ResourceDao method validateUniqueAzureNetwork.
private void validateUniqueAzureNetwork(ControlledAzureNetworkResource resource) {
String sql = "SELECT COUNT(1)" + " FROM resource" + " WHERE resource_type = :resource_type" + " AND stewardship_type = :stewardship_type" + " AND workspace_id = :workspace_id" + " AND attributes->>'networkName' = :network_name";
MapSqlParameterSource sqlParams = new MapSqlParameterSource().addValue("resource_type", AZURE_NETWORK.toSql()).addValue("stewardship_type", CONTROLLED.toSql()).addValue("workspace_id", resource.getWorkspaceId().toString()).addValue("network_name", resource.getNetworkName());
Integer matchingCount = jdbcTemplate.queryForObject(sql, sqlParams, Integer.class);
if (matchingCount != null && matchingCount > 0) {
throw new DuplicateResourceException(String.format("An Azure Network with ID %s already exists", resource.getNetworkName()));
}
}
use of bio.terra.workspace.service.resource.exception.DuplicateResourceException in project terra-workspace-manager by DataBiosphere.
the class CreateGcsBucketStep method doStep.
@Override
public StepResult doStep(FlightContext flightContext) throws InterruptedException, RetryException {
FlightMap inputMap = flightContext.getInputParameters();
ApiGcpGcsBucketCreationParameters creationParameters = inputMap.get(CREATION_PARAMETERS, ApiGcpGcsBucketCreationParameters.class);
String projectId = gcpCloudContextService.getRequiredGcpProject(resource.getWorkspaceId());
BucketInfo.Builder bucketInfoBuilder = BucketInfo.newBuilder(resource.getBucketName()).setLocation(Optional.ofNullable(creationParameters.getLocation()).orElse(DEFAULT_REGION));
// Remaining creation parameters are optional
Optional.ofNullable(creationParameters.getDefaultStorageClass()).map(GcsApiConversions::toGcsApi).ifPresent(bucketInfoBuilder::setStorageClass);
bucketInfoBuilder.setLifecycleRules(Optional.ofNullable(creationParameters.getLifecycle()).map(GcsApiConversions::toGcsApiRulesList).orElse(Collections.emptyList()));
BucketInfo.IamConfiguration iamConfiguration = BucketInfo.IamConfiguration.newBuilder().setIsUniformBucketLevelAccessEnabled(true).build();
bucketInfoBuilder.setIamConfiguration(iamConfiguration);
// Uniqueness within the project is already verified in WSM's DB earlier in this flight.
try {
Optional<Bucket> existingBucket = getBucket(resource.getBucketName());
if (existingBucket.isEmpty()) {
StorageCow storageCow = crlService.createStorageCow(projectId);
storageCow.create(bucketInfoBuilder.build());
} else if (bucketInProject(existingBucket.get(), projectId)) {
logger.info("Bucket {} already exists in workspace project, this is a Stairway retry. Continuing.", resource.getBucketName());
} else {
throw new DuplicateResourceException("The provided bucket name is already in use, please choose another.");
}
} catch (StorageException storageException) {
// in GCP's global bucket namespace, even if we don't have permission to GET it.
if (storageException.getCode() == HttpStatus.SC_CONFLICT) {
throw new DuplicateResourceException("The provided bucket name is already in use, please choose another.", storageException);
}
if (storageException.getCode() == HttpStatus.SC_BAD_REQUEST) {
throw new BadRequestException("Received 400 BAD_REQUEST exception when creating a new gcs-bucket", storageException);
}
// Other cloud errors are unexpected here, rethrow.
throw storageException;
}
return StepResult.getStepResultSuccess();
}
Aggregations