use of edu.harvard.iq.dataverse.engine.command.exception.CommandExecutionException in project dataverse by IQSS.
the class RestrictFileCommand method executeImpl.
@Override
protected void executeImpl(CommandContext ctxt) throws CommandException {
// check if public install & don't allow
boolean defaultValue = false;
boolean publicInstall = ctxt.settings().isTrueForKey(SettingsServiceBean.Key.PublicInstall, defaultValue);
if (publicInstall) {
throw new CommandExecutionException("Restricting files is not permitted on a public installation.", this);
}
if (file.getOwner() == null) {
// this is a new file through upload, restrict
file.getFileMetadata().setRestricted(restrict);
file.setRestricted(restrict);
} else {
Dataset dataset = file.getOwner();
DatasetVersion workingVersion = dataset.getEditVersion();
// check if this file is already restricted or already unrestricted
if ((restrict && file.getFileMetadata().isRestricted()) || (!restrict && !file.getFileMetadata().isRestricted())) {
String text = restrict ? "restricted" : "unrestricted";
throw new CommandExecutionException("File " + file.getDisplayName() + " is already " + text, this);
}
// check if this dataset is a draft (should be), then we can update restrict
if (workingVersion.isDraft()) {
// because we must update the working version metadata
if (dataset.isReleased()) {
for (FileMetadata fmw : workingVersion.getFileMetadatas()) {
if (file.equals(fmw.getDataFile())) {
fmw.setRestricted(restrict);
if (!file.isReleased()) {
file.setRestricted(restrict);
}
}
}
} else {
file.getFileMetadata().setRestricted(restrict);
if (!file.isReleased()) {
file.setRestricted(restrict);
}
if (file.getFileMetadata().isRestricted() != restrict) {
throw new CommandExecutionException("Failed to update the file metadata", this);
}
}
} else {
throw new CommandExecutionException("Working version must be a draft", this);
}
}
}
use of edu.harvard.iq.dataverse.engine.command.exception.CommandExecutionException in project dataverse by IQSS.
the class ContainerManagerImpl method deleteContainer.
@Override
public void deleteContainer(String uri, AuthCredentials authCredentials, SwordConfiguration sc) throws SwordError, SwordServerException, SwordAuthException {
AuthenticatedUser user = swordAuth.auth(authCredentials);
DataverseRequest dvRequest = new DataverseRequest(user, httpRequest);
logger.fine("deleteContainer called with url: " + uri);
urlManager.processUrl(uri);
logger.fine("original url: " + urlManager.getOriginalUrl());
if (!"edit".equals(urlManager.getServlet())) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "edit servlet expected, not " + urlManager.getServlet());
}
String targetType = urlManager.getTargetType();
if (!targetType.isEmpty()) {
logger.fine("operating on target type: " + urlManager.getTargetType());
if ("dataverse".equals(targetType)) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Dataverses can not be deleted via the Data Deposit API but other Dataverse APIs may support this operation.");
} else if ("study".equals(targetType)) {
String globalId = urlManager.getTargetIdentifier();
logger.fine("globalId: " + globalId);
if (globalId != null) {
Dataset dataset = dataset = datasetService.findByGlobalId(globalId);
if (dataset != null) {
Dataverse dvThatOwnsDataset = dataset.getOwner();
/**
* We are checking if DeleteDatasetVersionCommand can be
* called even though DeleteDatasetCommand can be called
* when a dataset hasn't been published. They should be
* equivalent in terms of a permission check.
*/
DeleteDatasetVersionCommand deleteDatasetVersionCommand = new DeleteDatasetVersionCommand(dvRequest, dataset);
if (!permissionService.isUserAllowedOn(user, deleteDatasetVersionCommand, dataset)) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "User " + user.getDisplayInfo().getTitle() + " is not authorized to modify " + dvThatOwnsDataset.getAlias());
}
DatasetVersion.VersionState datasetVersionState = dataset.getLatestVersion().getVersionState();
if (dataset.isReleased()) {
if (datasetVersionState.equals(DatasetVersion.VersionState.DRAFT)) {
logger.info("destroying working copy version of dataset " + dataset.getGlobalId());
try {
engineSvc.submit(deleteDatasetVersionCommand);
} catch (CommandException ex) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Can't delete dataset version for " + dataset.getGlobalId() + ": " + ex);
}
logger.info("dataset version deleted for dataset id " + dataset.getId());
} else if (datasetVersionState.equals(DatasetVersion.VersionState.RELEASED)) {
throw new SwordError(UriRegistry.ERROR_METHOD_NOT_ALLOWED, "Deaccessioning a dataset is no longer supported as of Data Deposit API version in URL (" + swordConfiguration.getBaseUrlPathV1() + ") Equivalent functionality is being developed at https://github.com/IQSS/dataverse/issues/778");
} else if (datasetVersionState.equals(DatasetVersion.VersionState.DEACCESSIONED)) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Lastest version of dataset " + dataset.getGlobalId() + " has already been deaccessioned.");
} else if (datasetVersionState.equals(DatasetVersion.VersionState.ARCHIVED)) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Lastest version of dataset " + dataset.getGlobalId() + " has been archived and can not be deleted or deaccessioned.");
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Operation not valid for dataset " + dataset.getGlobalId() + " in state " + datasetVersionState);
}
/**
* @todo Reformat else below properly so you can
* just reformat the whole file in Netbeans or
* similar.
*/
} else {
// dataset has never been published, this is just a sanity check (should always be draft)
if (datasetVersionState.equals(DatasetVersion.VersionState.DRAFT)) {
try {
engineSvc.submit(new DeleteDatasetCommand(dvRequest, dataset));
logger.fine("dataset deleted");
} catch (CommandExecutionException ex) {
// internal error
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Can't delete dataset: " + ex.getMessage());
} catch (CommandException ex) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Can't delete dataset: " + ex.getMessage());
}
} else {
// we should never get here. throw an error explaining why
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "dataset is in illegal state (not published yet not in draft)");
}
}
} else {
throw new SwordError(404);
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not find dataset to delete from URL: " + uri);
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Unsupported delete target in URL:" + uri);
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "No target for deletion specified");
}
}
use of edu.harvard.iq.dataverse.engine.command.exception.CommandExecutionException in project dataverse by IQSS.
the class DeleteDataFileCommand method executeImpl.
@Override
protected void executeImpl(CommandContext ctxt) throws CommandException {
if (destroy) {
// for now, if called as destroy, will check for superuser acess
if (doomed.getOwner().isReleased() && (!(getUser() instanceof AuthenticatedUser) || !getUser().isSuperuser())) {
throw new PermissionException("Destroy can only be called by superusers.", this, Collections.singleton(Permission.DeleteDatasetDraft), doomed);
}
} else // 3. confirm that version is not released
if (doomed.isReleased() || doomed.getFileMetadatas().size() > 1 || doomed.getFileMetadata().getDatasetVersion().isReleased()) {
throw new CommandException("Cannot delete file: the DataFile is published, is attached to more than one Dataset Version, or is attached to a released Dataset Version.", this);
}
// We need to delete a bunch of physical files, either from the file system,
// or from some other storage medium where the datafile is stored,
// via its StorageIO driver.
// First we delete the derivative files, then try to delete the data
// file itself; if that
// fails, we throw an exception and abort the command without
// trying to remove the object from the database.
// However, we do not attempt to do any deletes if this is a Harvested
// file.
logger.log(Level.FINE, "Delete command called on an unpublished DataFile {0}", doomed.getId());
if (!doomed.isHarvested() && !StringUtil.isEmpty(doomed.getStorageIdentifier())) {
if (FileUtil.isPackageFile(doomed)) {
try {
String datasetDirectory = doomed.getOwner().getFileSystemDirectory().toString();
Path datasetDirectoryPath = Paths.get(datasetDirectory, doomed.getStorageIdentifier());
Files.walkFileTree(datasetDirectoryPath, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException {
Files.delete(file);
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFileFailed(final Path file, final IOException e) {
return handleException(e);
}
private FileVisitResult handleException(final IOException e) {
logger.warning("Failed to delete file due to" + e.getMessage());
return FileVisitResult.TERMINATE;
}
@Override
public FileVisitResult postVisitDirectory(final Path dir, final IOException e) throws IOException {
if (e != null)
return handleException(e);
Files.delete(dir);
return FileVisitResult.CONTINUE;
}
});
} catch (IOException ioex) {
throw new CommandExecutionException("Failed to delete package file " + doomed.getStorageIdentifier(), ioex, this);
}
logger.info("Successfully deleted the package file " + doomed.getStorageIdentifier());
} else {
logger.log(Level.FINE, "Storage identifier for the file: {0}", doomed.getStorageIdentifier());
StorageIO<DataFile> storageIO = null;
try {
storageIO = doomed.getStorageIO();
} catch (IOException ioex) {
throw new CommandExecutionException("Failed to initialize physical access driver.", ioex, this);
}
if (storageIO != null) {
// log file and proceed deleting the database object.
try {
storageIO.open();
storageIO.deleteAllAuxObjects();
} catch (IOException ioex) {
Logger.getLogger(DeleteDataFileCommand.class.getName()).log(Level.SEVERE, "Error deleting Auxiliary file(s) while deleting DataFile {0}", doomed.getStorageIdentifier());
}
// We only want to attempt to delete the main physical file
// if it actually exists, on the filesystem or whereever it
// is actually stored by its StorageIO:
boolean physicalFileExists = false;
try {
physicalFileExists = storageIO.exists();
} catch (IOException ioex) {
// We'll assume that an exception here means that the file does not
// exist; so we can skip trying to delete it.
physicalFileExists = false;
}
if (physicalFileExists) {
try {
storageIO.delete();
} catch (IOException ex) {
// This we will treat as a fatal condition:
throw new CommandExecutionException("Error deleting physical file object while deleting DataFile " + doomed.getId() + " from the database.", ex, this);
}
}
logger.log(Level.FINE, "Successfully deleted physical storage object (file) for the DataFile {0}", doomed.getId());
// Destroy the storageIO object - we will need to purge the
// DataFile from the database (below), so we don't want to have any
// objects in this transaction that reference it:
storageIO = null;
}
}
}
DataFile doomedAndMerged = ctxt.em().merge(doomed);
ctxt.em().remove(doomedAndMerged);
/**
* @todo consider adding an em.flush here (despite the performance
* impact) if you need to operate on the dataset below. Without the
* flush, the dataset still thinks it has the file that was just
* deleted.
*/
// ctxt.em().flush();
/**
* We *could* re-index the entire dataset but it's more efficient to
* target individual files for deletion, which should always be drafts.
*
* See also https://redmine.hmdc.harvard.edu/issues/3786
*/
String indexingResult = ctxt.index().removeSolrDocFromIndex(IndexServiceBean.solrDocIdentifierFile + doomed.getId() + "_draft");
/**
* @todo check indexing result for success or failure. Really, we need
* an indexing queuing system:
* https://redmine.hmdc.harvard.edu/issues/3643
*/
}
Aggregations