use of bio.terra.workspace.generated.model.ApiGcpBigQueryDatasetCreationParameters in project terra-workspace-manager by DataBiosphere.
the class ControlledResourceServiceTest method updateBqDatasetDo.
@Test
@DisabledIfEnvironmentVariable(named = "TEST_ENV", matches = BUFFER_SERVICE_DISABLED_ENVS_REG_EX)
void updateBqDatasetDo() throws Exception {
// create the dataset
String datasetId = ControlledResourceFixtures.uniqueDatasetId();
String location = "us-central1";
ApiGcpBigQueryDatasetCreationParameters creationParameters = new ApiGcpBigQueryDatasetCreationParameters().datasetId(datasetId).location(location);
ControlledBigQueryDatasetResource resource = ControlledResourceFixtures.makeDefaultControlledBigQueryBuilder(workspace.getWorkspaceId()).datasetName(datasetId).build();
ControlledBigQueryDatasetResource createdDataset = controlledResourceService.createControlledResourceSync(resource, null, user.getAuthenticatedRequest(), creationParameters).castByEnum(WsmResourceType.CONTROLLED_GCP_BIG_QUERY_DATASET);
assertEquals(resource, createdDataset);
// Test idempotency of dataset-specific steps by retrying them once.
Map<String, StepStatus> retrySteps = new HashMap<>();
retrySteps.put(RetrieveBigQueryDatasetCloudAttributesStep.class.getName(), StepStatus.STEP_RESULT_FAILURE_RETRY);
retrySteps.put(UpdateBigQueryDatasetStep.class.getName(), StepStatus.STEP_RESULT_FAILURE_RETRY);
jobService.setFlightDebugInfoForTest(FlightDebugInfo.newBuilder().doStepFailures(retrySteps).build());
// update the dataset
String newName = "NEW_updateBqDatasetDo";
String newDescription = "new resource description";
Integer newDefaultTableLifetime = 3600;
Integer newDefaultPartitionLifetime = 3601;
ApiGcpBigQueryDatasetUpdateParameters updateParameters = new ApiGcpBigQueryDatasetUpdateParameters().defaultTableLifetime(newDefaultTableLifetime).defaultPartitionLifetime(newDefaultPartitionLifetime);
controlledResourceService.updateBqDataset(resource, updateParameters, user.getAuthenticatedRequest(), newName, newDescription);
// check the properties stored on the cloud were updated
validateBigQueryDatasetCloudMetadata(projectId, createdDataset.getDatasetName(), location, newDefaultTableLifetime, newDefaultPartitionLifetime);
// check the properties stored in WSM were updated
ControlledBigQueryDatasetResource fetchedResource = controlledResourceService.getControlledResource(workspace.getWorkspaceId(), resource.getResourceId(), user.getAuthenticatedRequest()).castByEnum(WsmResourceType.CONTROLLED_GCP_BIG_QUERY_DATASET);
assertEquals(newName, fetchedResource.getName());
assertEquals(newDescription, fetchedResource.getDescription());
}
use of bio.terra.workspace.generated.model.ApiGcpBigQueryDatasetCreationParameters in project terra-workspace-manager by DataBiosphere.
the class ControlledResourceServiceTest method updateBqDatasetWithInvalidExpirationTimes.
@Test
@DisabledIfEnvironmentVariable(named = "TEST_ENV", matches = BUFFER_SERVICE_DISABLED_ENVS_REG_EX)
void updateBqDatasetWithInvalidExpirationTimes() throws Exception {
// create the dataset, with expiration times initially undefined
String datasetId = ControlledResourceFixtures.uniqueDatasetId();
String location = "us-central1";
ApiGcpBigQueryDatasetCreationParameters creationParameters = new ApiGcpBigQueryDatasetCreationParameters().datasetId(datasetId).location(location);
ControlledBigQueryDatasetResource resource = ControlledResourceFixtures.makeDefaultControlledBigQueryBuilder(workspace.getWorkspaceId()).datasetName(datasetId).build();
ControlledBigQueryDatasetResource createdDataset = controlledResourceService.createControlledResourceSync(resource, null, user.getAuthenticatedRequest(), creationParameters).castByEnum(WsmResourceType.CONTROLLED_GCP_BIG_QUERY_DATASET);
// make an update request to set the table expiration time to an invalid value (<3600)
final ApiGcpBigQueryDatasetUpdateParameters updateParameters = new ApiGcpBigQueryDatasetUpdateParameters().defaultTableLifetime(3000).defaultPartitionLifetime(3601);
assertThrows(BadRequestException.class, () -> controlledResourceService.updateBqDataset(resource, updateParameters, user.getAuthenticatedRequest(), null, null));
// check the expiration times stored on the cloud are still undefined, because the update above
// failed
validateBigQueryDatasetCloudMetadata(projectId, createdDataset.getDatasetName(), location, null, null);
// make another update request to set the partition expiration time to an invalid value (<0)
final ApiGcpBigQueryDatasetUpdateParameters updateParameters2 = new ApiGcpBigQueryDatasetUpdateParameters().defaultTableLifetime(3600).defaultPartitionLifetime(-2);
assertThrows(BadRequestException.class, () -> controlledResourceService.updateBqDataset(resource, updateParameters2, user.getAuthenticatedRequest(), null, null));
// check the expiration times stored on the cloud are still undefined, because the update above
// failed
validateBigQueryDatasetCloudMetadata(projectId, createdDataset.getDatasetName(), location, null, null);
}
use of bio.terra.workspace.generated.model.ApiGcpBigQueryDatasetCreationParameters in project terra-workspace-manager by DataBiosphere.
the class ControlledResourceServiceTest method deleteBqDatasetDo.
@Test
@DisabledIfEnvironmentVariable(named = "TEST_ENV", matches = BUFFER_SERVICE_DISABLED_ENVS_REG_EX)
void deleteBqDatasetDo() throws Exception {
String datasetId = ControlledResourceFixtures.uniqueDatasetId();
String location = "us-central1";
ApiGcpBigQueryDatasetCreationParameters creationParameters = new ApiGcpBigQueryDatasetCreationParameters().datasetId(datasetId).location(location);
ControlledBigQueryDatasetResource resource = ControlledResourceFixtures.makeDefaultControlledBigQueryBuilder(workspace.getWorkspaceId()).datasetName(datasetId).build();
ControlledBigQueryDatasetResource createdDataset = controlledResourceService.createControlledResourceSync(resource, null, user.getAuthenticatedRequest(), creationParameters).castByEnum(WsmResourceType.CONTROLLED_GCP_BIG_QUERY_DATASET);
assertEquals(resource, createdDataset);
// Test idempotency of delete by retrying steps once.
Map<String, StepStatus> retrySteps = new HashMap<>();
retrySteps.put(DeleteMetadataStep.class.getName(), StepStatus.STEP_RESULT_FAILURE_RETRY);
retrySteps.put(DeleteBigQueryDatasetStep.class.getName(), StepStatus.STEP_RESULT_FAILURE_RETRY);
// Do not test lastStepFailure, as this flight has no undo steps, only dismal failure.
jobService.setFlightDebugInfoForTest(FlightDebugInfo.newBuilder().doStepFailures(retrySteps).build());
controlledResourceService.deleteControlledResourceSync(resource.getWorkspaceId(), resource.getResourceId(), user.getAuthenticatedRequest());
BigQueryCow bqCow = crlService.createWsmSaBigQueryCow();
GoogleJsonResponseException getException = assertThrows(GoogleJsonResponseException.class, () -> bqCow.datasets().get(projectId, resource.getDatasetName()).execute());
assertEquals(HttpStatus.NOT_FOUND.value(), getException.getStatusCode());
assertThrows(ResourceNotFoundException.class, () -> controlledResourceService.getControlledResource(workspace.getWorkspaceId(), resource.getResourceId(), user.getAuthenticatedRequest()));
}
use of bio.terra.workspace.generated.model.ApiGcpBigQueryDatasetCreationParameters in project terra-workspace-manager by DataBiosphere.
the class ControlledResourceServiceTest method createBqDatasetUndo.
@Test
@DisabledIfEnvironmentVariable(named = "TEST_ENV", matches = BUFFER_SERVICE_DISABLED_ENVS_REG_EX)
void createBqDatasetUndo() throws Exception {
String datasetId = ControlledResourceFixtures.uniqueDatasetId();
String location = "us-central1";
ApiGcpBigQueryDatasetCreationParameters creationParameters = new ApiGcpBigQueryDatasetCreationParameters().datasetId(datasetId).location(location);
ControlledBigQueryDatasetResource resource = ControlledResourceFixtures.makeDefaultControlledBigQueryBuilder(workspace.getWorkspaceId()).datasetName(datasetId).build();
// Test idempotency of datatset-specific undo step by retrying once.
Map<String, StepStatus> retrySteps = new HashMap<>();
retrySteps.put(CreateBigQueryDatasetStep.class.getName(), StepStatus.STEP_RESULT_FAILURE_RETRY);
jobService.setFlightDebugInfoForTest(FlightDebugInfo.newBuilder().lastStepFailure(true).undoStepFailures(retrySteps).build());
// Service methods which wait for a flight to complete will throw an
// InvalidResultStateException when that flight fails without a cause, which occurs when a
// flight fails via debugInfo.
assertThrows(InvalidResultStateException.class, () -> controlledResourceService.createControlledResourceSync(resource, null, user.getAuthenticatedRequest(), creationParameters));
BigQueryCow bqCow = crlService.createWsmSaBigQueryCow();
GoogleJsonResponseException getException = assertThrows(GoogleJsonResponseException.class, () -> bqCow.datasets().get(projectId, resource.getDatasetName()).execute());
assertEquals(HttpStatus.NOT_FOUND.value(), getException.getStatusCode());
assertThrows(ResourceNotFoundException.class, () -> controlledResourceService.getControlledResource(workspace.getWorkspaceId(), resource.getResourceId(), user.getAuthenticatedRequest()));
}
use of bio.terra.workspace.generated.model.ApiGcpBigQueryDatasetCreationParameters in project terra-workspace-manager by DataBiosphere.
the class ControlledResourceServiceTest method updateBqDatasetWithUndefinedExpirationTimes.
@Test
@DisabledIfEnvironmentVariable(named = "TEST_ENV", matches = BUFFER_SERVICE_DISABLED_ENVS_REG_EX)
void updateBqDatasetWithUndefinedExpirationTimes() throws Exception {
// create the dataset, with expiration times initially defined
String datasetId = ControlledResourceFixtures.uniqueDatasetId();
String location = "us-central1";
Integer initialDefaultTableLifetime = 4800;
Integer initialDefaultPartitionLifetime = 4801;
ApiGcpBigQueryDatasetCreationParameters creationParameters = new ApiGcpBigQueryDatasetCreationParameters().datasetId(datasetId).location(location).defaultTableLifetime(initialDefaultTableLifetime).defaultPartitionLifetime(initialDefaultPartitionLifetime);
ControlledBigQueryDatasetResource resource = ControlledResourceFixtures.makeDefaultControlledBigQueryBuilder(workspace.getWorkspaceId()).datasetName(datasetId).build();
ControlledBigQueryDatasetResource createdDataset = controlledResourceService.createControlledResourceSync(resource, null, user.getAuthenticatedRequest(), creationParameters).castByEnum(WsmResourceType.CONTROLLED_GCP_BIG_QUERY_DATASET);
// check the expiration times stored on the cloud are defined
validateBigQueryDatasetCloudMetadata(projectId, createdDataset.getDatasetName(), location, initialDefaultTableLifetime, initialDefaultPartitionLifetime);
// make an update request to set the expiration times to undefined values
ApiGcpBigQueryDatasetUpdateParameters updateParameters = new ApiGcpBigQueryDatasetUpdateParameters().defaultTableLifetime(0).defaultPartitionLifetime(0);
controlledResourceService.updateBqDataset(resource, updateParameters, user.getAuthenticatedRequest(), null, null);
// check the expiration times stored on the cloud are now undefined
validateBigQueryDatasetCloudMetadata(projectId, createdDataset.getDatasetName(), location, null, null);
// update just one expiration time back to a defined value
Integer newDefaultTableLifetime = 3600;
updateParameters = new ApiGcpBigQueryDatasetUpdateParameters().defaultTableLifetime(newDefaultTableLifetime);
controlledResourceService.updateBqDataset(resource, updateParameters, user.getAuthenticatedRequest(), null, null);
// check there is one defined and one undefined expiration value
validateBigQueryDatasetCloudMetadata(projectId, createdDataset.getDatasetName(), location, newDefaultTableLifetime, null);
// update the other expiration time back to a defined value
Integer newDefaultPartitionLifetime = 3601;
updateParameters = new ApiGcpBigQueryDatasetUpdateParameters().defaultPartitionLifetime(newDefaultPartitionLifetime);
controlledResourceService.updateBqDataset(resource, updateParameters, user.getAuthenticatedRequest(), null, null);
// check the expiration times stored on the cloud are both defined again
validateBigQueryDatasetCloudMetadata(projectId, createdDataset.getDatasetName(), location, newDefaultTableLifetime, newDefaultPartitionLifetime);
}
Aggregations