use of bio.terra.model.FileModel in project jade-data-repo by DataBiosphere.
the class DatasetConnectedTest method testExcludeLockedFromFileLookups.
@Test
public void testExcludeLockedFromFileLookups() throws Exception {
// check that the dataset metadata row is unlocked
UUID datasetId = UUID.fromString(summaryModel.getId());
String exclusiveLock = datasetDao.getExclusiveLock(datasetId);
assertNull("dataset row is not exclusively locked", exclusiveLock);
String[] sharedLocks = datasetDao.getSharedLocks(datasetId);
assertEquals("dataset row has no shared lock", 0, sharedLocks.length);
// ingest a file
URI sourceUri = new URI("gs", "jade-testdata", "/fileloadprofiletest/1KBfile.txt", null, null);
String targetPath1 = "/mm/" + Names.randomizeName("testdir") + "/testExcludeLockedFromFileLookups.txt";
FileLoadModel fileLoadModel = new FileLoadModel().sourcePath(sourceUri.toString()).description("testExcludeLockedFromFileLookups").mimeType("text/plain").targetPath(targetPath1).profileId(billingProfile.getId());
FileModel fileModel = connectedOperations.ingestFileSuccess(summaryModel.getId(), fileLoadModel);
// lookup the file by id and check that it's found
FileModel fileModelFromIdLookup = connectedOperations.lookupFileSuccess(summaryModel.getId(), fileModel.getFileId());
assertEquals("File found by id lookup", fileModel.getDescription(), fileModelFromIdLookup.getDescription());
// lookup the file by path and check that it's found
FileModel fileModelFromPathLookup = connectedOperations.lookupFileByPathSuccess(summaryModel.getId(), fileModel.getPath(), -1);
assertEquals("File found by path lookup", fileModel.getDescription(), fileModelFromPathLookup.getDescription());
// NO ASSERTS inside the block below where hang is enabled to reduce chance of failing before disabling the hang
// ====================================================
// enable hang in DeleteDatasetPrimaryDataStep
configService.setFault(ConfigEnum.DATASET_DELETE_LOCK_CONFLICT_STOP_FAULT.name(), true);
// kick off a request to delete the dataset. this should hang before unlocking the dataset object.
MvcResult deleteResult = mvc.perform(delete("/api/repository/v1/datasets/" + summaryModel.getId())).andReturn();
// give the flight time to launch
TimeUnit.SECONDS.sleep(5);
// check that the dataset metadata row has an exclusive lock
// note: asserts are below outside the hang block
exclusiveLock = datasetDao.getExclusiveLock(datasetId);
sharedLocks = datasetDao.getSharedLocks(datasetId);
// lookup the file by id and check that it's NOT found
// note: asserts are below outside the hang block
MockHttpServletResponse lookupFileByIdResponse = connectedOperations.lookupFileRaw(summaryModel.getId(), fileModel.getFileId());
// lookup the file by path and check that it's NOT found
// note: asserts are below outside the hang block
MockHttpServletResponse lookupFileByPathResponse = connectedOperations.lookupFileByPathRaw(summaryModel.getId(), fileModel.getPath(), -1);
// disable hang in DeleteDatasetPrimaryDataStep
configService.setFault(ConfigEnum.DATASET_DELETE_LOCK_CONFLICT_CONTINUE_FAULT.name(), true);
// ====================================================
// check that the dataset metadata row has an exclusive lock after kicking off the delete
assertNotNull("dataset row is exclusively locked", exclusiveLock);
assertEquals("dataset row has no shared lock", 0, sharedLocks.length);
// check that the lookup file by id returned not found
assertEquals("File NOT found by id lookup", HttpStatus.NOT_FOUND, HttpStatus.valueOf(lookupFileByIdResponse.getStatus()));
// check that the lookup file by path returned not found
assertEquals("File NOT found by path lookup", HttpStatus.NOT_FOUND, HttpStatus.valueOf(lookupFileByPathResponse.getStatus()));
// check the response from the delete request
MockHttpServletResponse deleteResponse = connectedOperations.validateJobModelAndWait(deleteResult);
DeleteResponseModel deleteResponseModel = connectedOperations.handleSuccessCase(deleteResponse, DeleteResponseModel.class);
assertEquals("Dataset delete returned successfully", DeleteResponseModel.ObjectStateEnum.DELETED, deleteResponseModel.getObjectState());
// remove the file from the connectedoperation bookkeeping list
connectedOperations.removeFile(summaryModel.getId(), fileModel.getFileId());
// try to fetch the dataset again and confirm nothing is returned
connectedOperations.getDatasetExpectError(summaryModel.getId(), HttpStatus.NOT_FOUND);
}
use of bio.terra.model.FileModel in project jade-data-repo by DataBiosphere.
the class FileTest method fileParallelFailedLoadTest.
// DR-612 filesystem corruption test; use a non-existent file to make sure everything errors
// Do file ingests in parallel using a filename that will cause failure
@Test
public void fileParallelFailedLoadTest() throws Exception {
List<DataRepoResponse<JobModel>> responseList = new ArrayList<>();
String gsPath = "gs://" + testConfiguration.getIngestbucket() + "/nonexistentfile";
String filePath = "/foo" + UUID.randomUUID().toString() + "/bar";
for (int i = 0; i < 20; i++) {
DataRepoResponse<JobModel> launchResp = dataRepoFixtures.ingestFileLaunch(steward(), datasetId, profileId, gsPath, filePath + i);
responseList.add(launchResp);
}
int failureCount = 0;
for (DataRepoResponse<JobModel> resp : responseList) {
DataRepoResponse<FileModel> response = dataRepoClient.waitForResponse(steward(), resp, FileModel.class);
if (response.getStatusCode() == HttpStatus.NOT_FOUND) {
System.out.println("Got expected not found");
} else {
System.out.println("Unexpected: " + response.getStatusCode().toString());
if (response.getErrorObject().isPresent()) {
ErrorModel errorModel = response.getErrorObject().get();
System.out.println("Error: " + errorModel.getMessage());
}
failureCount++;
}
}
assertThat("No unexpected failures", failureCount, equalTo(0));
}
use of bio.terra.model.FileModel in project jade-data-repo by DataBiosphere.
the class ConnectedOperations method retryAcquireLockIngestFileSuccess.
public void retryAcquireLockIngestFileSuccess(boolean attemptRetry, String datasetId, FileLoadModel fileLoadModel, ConfigurationService configService, DatasetDao datasetDao) throws Exception {
// Insert fault into shared lock
ConfigEnum faultToInsert = attemptRetry ? ConfigEnum.FILE_INGEST_SHARED_LOCK_RETRY_FAULT : ConfigEnum.FILE_INGEST_SHARED_LOCK_FATAL_FAULT;
configService.setFault(faultToInsert.name(), true);
String jsonRequest = TestUtils.mapToJson(fileLoadModel);
String url = "/api/repository/v1/datasets/" + datasetId + "/files";
MvcResult result = mvc.perform(post(url).contentType(MediaType.APPLICATION_JSON).content(jsonRequest)).andReturn();
// give the flight time to fail a couple of times
TimeUnit.SECONDS.sleep(5);
datasetDaoUtils = new DatasetDaoUtils();
String[] sharedLocks = datasetDaoUtils.getSharedLocks(datasetDao, UUID.fromString(datasetId));
// Remove insertion of shared lock fault
configService.setFault(faultToInsert.name(), false);
assertEquals("no shared locks after first call", 0, sharedLocks.length);
MockHttpServletResponse response = validateJobModelAndWait(result);
if (attemptRetry) {
// Check if the flight successfully completed
// Assume that if it successfully completed, then it was able to retry and acquire the shared lock
FileModel fileModel = handleSuccessCase(response, FileModel.class);
checkSuccessfulFileLoad(fileLoadModel, fileModel, datasetId);
} else {
handleFailureCase(response);
}
}
use of bio.terra.model.FileModel in project jade-data-repo by DataBiosphere.
the class EncodeFileTest method testSnapEnum.
private void testSnapEnum(Map<String, List<String>> dirmap, String snapshotId, String datasetPath, int inDepth) throws Exception {
FileModel fsObj = connectedOperations.lookupSnapshotFileByPathSuccess(snapshotId, datasetPath, inDepth);
int maxDepth = checkSnapEnum(dirmap, 0, fsObj);
int depth = (inDepth == -1) ? MAX_DIRECTORY_DEPTH : inDepth;
assertThat("Depth is correct", maxDepth, equalTo(depth));
}
use of bio.terra.model.FileModel in project jade-data-repo by DataBiosphere.
the class EncodeFileTest method encodeFileTest.
// NOTES ABOUT THIS TEST: this test requires create access to the jade-testdata bucket in order to
// re-write the json source data replacing the gs paths with the Jade object id.
@Test
public void encodeFileTest() throws Exception {
DatasetSummaryModel datasetSummary = connectedOperations.createDataset(profileModel, "encodefiletest-dataset.json");
// Load all of the files into the dataset
String targetPath = loadFiles(datasetSummary.getId(), false, false);
String gsPath = "gs://" + testConfig.getIngestbucket() + "/" + targetPath;
IngestRequestModel ingestRequest = new IngestRequestModel().format(IngestRequestModel.FormatEnum.JSON).table("file").path(gsPath);
connectedOperations.ingestTableSuccess(datasetSummary.getId(), ingestRequest);
// Delete the scratch blob
Blob scratchBlob = storage.get(BlobId.of(testConfig.getIngestbucket(), targetPath));
if (scratchBlob != null) {
scratchBlob.delete();
}
// Load donor success
ingestRequest.table("donor").path("gs://" + testConfig.getIngestbucket() + "/encodetest/donor.json");
connectedOperations.ingestTableSuccess(datasetSummary.getId(), ingestRequest);
// At this point, we have files and tabular data. Let's make a snapshot!
SnapshotSummaryModel snapshotSummary = connectedOperations.createSnapshot(datasetSummary, "encodefiletest-snapshot.json", "");
String fileUri = getFileRefIdFromSnapshot(snapshotSummary);
DrsId drsId = drsIdService.fromUri(fileUri);
DRSObject drsObject = connectedOperations.drsGetObjectSuccess(drsId.toDrsObjectId(), false);
String filePath = drsObject.getAliases().get(0);
FileModel fsObjById = connectedOperations.lookupSnapshotFileSuccess(snapshotSummary.getId(), drsId.getFsObjectId());
FileModel fsObjByPath = connectedOperations.lookupSnapshotFileByPathSuccess(snapshotSummary.getId(), filePath, 0);
assertThat("Retrieve snapshot file objects match", fsObjById, equalTo(fsObjByPath));
assertThat("Load tag is stored", fsObjById.getFileDetail().getLoadTag(), equalTo(loadTag));
// Build the reference directory name map
String datasetPath = "/" + datasetSummary.getName();
Map<String, List<String>> dirmap = makeDirectoryMap(datasetPath);
testSnapEnum(dirmap, snapshotSummary.getId(), datasetPath, -1);
testSnapEnum(dirmap, snapshotSummary.getId(), datasetPath, 0);
testSnapEnum(dirmap, snapshotSummary.getId(), datasetPath, 6);
testSnapEnum(dirmap, snapshotSummary.getId(), datasetPath, 3);
// Try to delete a file with a dependency
MvcResult result = mvc.perform(delete("/api/repository/v1/datasets/" + datasetSummary.getId() + "/files/" + drsId.getFsObjectId())).andReturn();
MockHttpServletResponse response = connectedOperations.validateJobModelAndWait(result);
assertThat(response.getStatus(), equalTo(HttpStatus.BAD_REQUEST.value()));
ErrorModel errorModel = connectedOperations.handleFailureCase(response);
assertThat("correct dependency error message", errorModel.getMessage(), containsString("used by at least one snapshot"));
}
Aggregations