use of bio.terra.model.DatasetModel in project jade-data-repo by DataBiosphere.
the class CreateDatasetAuthzBqJobUserStep method doStep.
@Override
public StepResult doStep(FlightContext context) throws InterruptedException {
FlightMap workingMap = context.getWorkingMap();
UUID datasetId = workingMap.get(DatasetWorkingMapKeys.DATASET_ID, UUID.class);
Map<IamRole, String> policies = workingMap.get(DatasetWorkingMapKeys.POLICY_EMAILS, Map.class);
Dataset dataset = datasetService.retrieve(datasetId);
DatasetModel datasetModel = datasetService.retrieveModel(dataset);
// The underlying service provides retries so we do not need to retry this operation
resourceService.grantPoliciesBqJobUser(datasetModel.getDataProject(), policies.values());
return StepResult.getStepResultSuccess();
}
use of bio.terra.model.DatasetModel in project jade-data-repo by DataBiosphere.
the class BigQueryVisitor method visitTable_expr.
@Override
public String visitTable_expr(SQLParser.Table_exprContext ctx) {
String datasetName = getNameFromContext(ctx.dataset_name());
DatasetModel dataset = getDatasetByName(datasetName);
String dataProjectId = dataset.getDataProject();
String bqDatasetName = PdaoConstant.PDAO_PREFIX + getNameFromContext(ctx.dataset_name());
String tableName = getNameFromContext(ctx.table_name());
String alias = generateAlias(bqDatasetName, tableName);
return String.format("`%s.%s.%s` AS `%s`", dataProjectId, bqDatasetName, tableName, alias);
}
use of bio.terra.model.DatasetModel in project jade-data-repo by DataBiosphere.
the class DatasetConnectedTest method testExcludeLockedFromDatasetLookups.
@Test
public void testExcludeLockedFromDatasetLookups() throws Exception {
// check that the dataset metadata row is unlocked
UUID datasetId = UUID.fromString(summaryModel.getId());
String exclusiveLock = datasetDao.getExclusiveLock(datasetId);
assertNull("dataset row is not exclusively locked", exclusiveLock);
String[] sharedLocks = datasetDao.getSharedLocks(datasetId);
assertEquals("dataset row has no shared lock", 0, sharedLocks.length);
// retrieve the dataset and check that it finds it
DatasetModel datasetModel = connectedOperations.getDataset(summaryModel.getId());
assertEquals("Lookup unlocked dataset succeeds", summaryModel.getName(), datasetModel.getName());
// enumerate datasets and check that this dataset is included in the set
EnumerateDatasetModel enumerateDatasetModel = connectedOperations.enumerateDatasets(summaryModel.getName());
List<DatasetSummaryModel> enumeratedDatasets = enumerateDatasetModel.getItems();
boolean foundDatasetWithMatchingId = false;
for (DatasetSummaryModel enumeratedDataset : enumeratedDatasets) {
if (enumeratedDataset.getId().equals(summaryModel.getId())) {
foundDatasetWithMatchingId = true;
break;
}
}
assertTrue("Unlocked included in enumeration", foundDatasetWithMatchingId);
// NO ASSERTS inside the block below where hang is enabled to reduce chance of failing before disabling the hang
// ====================================================
// enable hang in DeleteDatasetPrimaryDataStep
configService.setFault(ConfigEnum.DATASET_DELETE_LOCK_CONFLICT_STOP_FAULT.name(), true);
// kick off a request to delete the dataset. this should hang before unlocking the dataset object.
MvcResult deleteResult = mvc.perform(delete("/api/repository/v1/datasets/" + summaryModel.getId())).andReturn();
// give the flight time to launch
TimeUnit.SECONDS.sleep(5);
// check that the dataset metadata row has an exclusive lock
// note: asserts are below outside the hang block
exclusiveLock = datasetDao.getExclusiveLock(datasetId);
sharedLocks = datasetDao.getSharedLocks(datasetId);
// retrieve the dataset, should return not found
// note: asserts are below outside the hang block
MvcResult retrieveResult = mvc.perform(get("/api/repository/v1/datasets/" + datasetId)).andReturn();
// enumerate datasets, this dataset should not be included in the set
// note: asserts are below outside the hang block
MvcResult enumerateResult = connectedOperations.enumerateDatasetsRaw(summaryModel.getName());
// disable hang in DeleteDatasetPrimaryDataStep
configService.setFault(ConfigEnum.DATASET_DELETE_LOCK_CONFLICT_CONTINUE_FAULT.name(), true);
// ====================================================
// check that the dataset metadata row has an exclusive lock after kicking off the delete
assertNotNull("dataset row is exclusively locked", exclusiveLock);
assertEquals("dataset row has no shared lock", 0, sharedLocks.length);
// check that the retrieve request returned not found
connectedOperations.handleFailureCase(retrieveResult.getResponse(), HttpStatus.NOT_FOUND);
// check that the enumerate request returned successfully and that this dataset is not included in the set
enumerateDatasetModel = connectedOperations.handleSuccessCase(enumerateResult.getResponse(), EnumerateDatasetModel.class);
enumeratedDatasets = enumerateDatasetModel.getItems();
foundDatasetWithMatchingId = false;
for (DatasetSummaryModel enumeratedDataset : enumeratedDatasets) {
if (enumeratedDataset.getId().equals(summaryModel.getId())) {
foundDatasetWithMatchingId = true;
break;
}
}
assertFalse("Exclusively locked not included in enumeration", foundDatasetWithMatchingId);
// check the response from the delete request
MockHttpServletResponse deleteResponse = connectedOperations.validateJobModelAndWait(deleteResult);
DeleteResponseModel deleteResponseModel = connectedOperations.handleSuccessCase(deleteResponse, DeleteResponseModel.class);
assertEquals("Dataset delete returned successfully", DeleteResponseModel.ObjectStateEnum.DELETED, deleteResponseModel.getObjectState());
// try to fetch the dataset again and confirm nothing is returned
connectedOperations.getDatasetExpectError(summaryModel.getId(), HttpStatus.NOT_FOUND);
}
use of bio.terra.model.DatasetModel in project jade-data-repo by DataBiosphere.
the class DatasetConnectedTest method testDuplicateName.
@Test
public void testDuplicateName() throws Exception {
assertNotNull("created dataset successfully the first time", summaryModel);
// fetch the dataset and confirm the metadata matches the request
DatasetModel datasetModel = connectedOperations.getDataset(summaryModel.getId());
assertNotNull("fetched dataset successfully after creation", datasetModel);
assertEquals("fetched dataset name matches request", datasetRequest.getName(), datasetModel.getName());
// check that the dataset metadata row is unlocked
String exclusiveLock = datasetDao.getExclusiveLock(UUID.fromString(summaryModel.getId()));
assertNull("dataset row is unlocked", exclusiveLock);
// try to create the same dataset again and check that it fails
ErrorModel errorModel = connectedOperations.createDatasetExpectError(datasetRequest, HttpStatus.BAD_REQUEST);
assertThat("error message includes name conflict", errorModel.getMessage(), containsString("Dataset name already exists"));
// delete the dataset and check that it succeeds
connectedOperations.deleteTestDataset(summaryModel.getId());
// try to fetch the dataset again and confirm nothing is returned
connectedOperations.getDatasetExpectError(summaryModel.getId(), HttpStatus.NOT_FOUND);
}
use of bio.terra.model.DatasetModel in project jade-data-repo by DataBiosphere.
the class SnapshotTest method snapshotRowIdsHappyPathTest.
@Test
public void snapshotRowIdsHappyPathTest() throws Exception {
// fetch rowIds from the ingested dataset by querying the participant table
DatasetModel dataset = dataRepoFixtures.getDataset(steward(), datasetId);
String datasetProject = dataset.getDataProject();
String bqDatasetName = PdaoConstant.PDAO_PREFIX + dataset.getName();
String participantTable = "participant";
String sampleTable = "sample";
BigQuery bigQuery = BigQueryFixtures.getBigQuery(dataset.getDataProject(), stewardToken);
String sql = String.format("SELECT %s FROM `%s.%s.%s`", PdaoConstant.PDAO_ROW_ID_COLUMN, datasetProject, bqDatasetName, participantTable);
TableResult participantIds = BigQueryFixtures.query(sql, bigQuery);
List<String> participantIdList = StreamSupport.stream(participantIds.getValues().spliterator(), false).map(v -> v.get(0).getStringValue()).collect(Collectors.toList());
sql = String.format("SELECT %s FROM `%s.%s.%s`", PdaoConstant.PDAO_ROW_ID_COLUMN, datasetProject, bqDatasetName, sampleTable);
TableResult sampleIds = BigQueryFixtures.query(sql, bigQuery);
List<String> sampleIdList = StreamSupport.stream(sampleIds.getValues().spliterator(), false).map(v -> v.get(0).getStringValue()).collect(Collectors.toList());
// swap in these row ids in the request
SnapshotRequestModel requestModel = jsonLoader.loadObject("ingest-test-snapshot-row-ids-test.json", SnapshotRequestModel.class);
requestModel.getContents().get(0).getRowIdSpec().getTables().get(0).setRowIds(participantIdList);
requestModel.getContents().get(0).getRowIdSpec().getTables().get(1).setRowIds(sampleIdList);
SnapshotSummaryModel snapshotSummary = dataRepoFixtures.createSnapshotWithRequest(steward(), dataset.getName(), requestModel);
TimeUnit.SECONDS.sleep(10);
createdSnapshotIds.add(snapshotSummary.getId());
SnapshotModel snapshot = dataRepoFixtures.getSnapshot(steward(), snapshotSummary.getId());
assertEquals("new snapshot has been created", snapshot.getName(), requestModel.getName());
assertEquals("new snapshot has the correct number of tables", requestModel.getContents().get(0).getRowIdSpec().getTables().size(), snapshot.getTables().size());
// TODO: get the snapshot and make sure the number of rows matches with the row ids input
assertThat("one relationship comes through", snapshot.getRelationships().size(), equalTo(1));
assertThat("the right relationship comes through", snapshot.getRelationships().get(0).getName(), equalTo("sample_participants"));
}
Aggregations