use of edu.harvard.iq.dataverse.engine.command.exception.PermissionException in project dataverse by IQSS.
the class DeleteDataFileCommand method executeImpl.
@Override
protected void executeImpl(CommandContext ctxt) throws CommandException {
if (destroy) {
// for now, if called as destroy, will check for superuser acess
if (doomed.getOwner().isReleased() && (!(getUser() instanceof AuthenticatedUser) || !getUser().isSuperuser())) {
throw new PermissionException("Destroy can only be called by superusers.", this, Collections.singleton(Permission.DeleteDatasetDraft), doomed);
}
} else // 3. confirm that version is not released
if (doomed.isReleased() || doomed.getFileMetadatas().size() > 1 || doomed.getFileMetadata().getDatasetVersion().isReleased()) {
throw new CommandException("Cannot delete file: the DataFile is published, is attached to more than one Dataset Version, or is attached to a released Dataset Version.", this);
}
// We need to delete a bunch of physical files, either from the file system,
// or from some other storage medium where the datafile is stored,
// via its StorageIO driver.
// First we delete the derivative files, then try to delete the data
// file itself; if that
// fails, we throw an exception and abort the command without
// trying to remove the object from the database.
// However, we do not attempt to do any deletes if this is a Harvested
// file.
logger.log(Level.FINE, "Delete command called on an unpublished DataFile {0}", doomed.getId());
if (!doomed.isHarvested() && !StringUtil.isEmpty(doomed.getStorageIdentifier())) {
if (FileUtil.isPackageFile(doomed)) {
try {
String datasetDirectory = doomed.getOwner().getFileSystemDirectory().toString();
Path datasetDirectoryPath = Paths.get(datasetDirectory, doomed.getStorageIdentifier());
Files.walkFileTree(datasetDirectoryPath, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException {
Files.delete(file);
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFileFailed(final Path file, final IOException e) {
return handleException(e);
}
private FileVisitResult handleException(final IOException e) {
logger.warning("Failed to delete file due to" + e.getMessage());
return FileVisitResult.TERMINATE;
}
@Override
public FileVisitResult postVisitDirectory(final Path dir, final IOException e) throws IOException {
if (e != null)
return handleException(e);
Files.delete(dir);
return FileVisitResult.CONTINUE;
}
});
} catch (IOException ioex) {
throw new CommandExecutionException("Failed to delete package file " + doomed.getStorageIdentifier(), ioex, this);
}
logger.info("Successfully deleted the package file " + doomed.getStorageIdentifier());
} else {
logger.log(Level.FINE, "Storage identifier for the file: {0}", doomed.getStorageIdentifier());
StorageIO<DataFile> storageIO = null;
try {
storageIO = doomed.getStorageIO();
} catch (IOException ioex) {
throw new CommandExecutionException("Failed to initialize physical access driver.", ioex, this);
}
if (storageIO != null) {
// log file and proceed deleting the database object.
try {
storageIO.open();
storageIO.deleteAllAuxObjects();
} catch (IOException ioex) {
Logger.getLogger(DeleteDataFileCommand.class.getName()).log(Level.SEVERE, "Error deleting Auxiliary file(s) while deleting DataFile {0}", doomed.getStorageIdentifier());
}
// We only want to attempt to delete the main physical file
// if it actually exists, on the filesystem or whereever it
// is actually stored by its StorageIO:
boolean physicalFileExists = false;
try {
physicalFileExists = storageIO.exists();
} catch (IOException ioex) {
// We'll assume that an exception here means that the file does not
// exist; so we can skip trying to delete it.
physicalFileExists = false;
}
if (physicalFileExists) {
try {
storageIO.delete();
} catch (IOException ex) {
// This we will treat as a fatal condition:
throw new CommandExecutionException("Error deleting physical file object while deleting DataFile " + doomed.getId() + " from the database.", ex, this);
}
}
logger.log(Level.FINE, "Successfully deleted physical storage object (file) for the DataFile {0}", doomed.getId());
// Destroy the storageIO object - we will need to purge the
// DataFile from the database (below), so we don't want to have any
// objects in this transaction that reference it:
storageIO = null;
}
}
}
DataFile doomedAndMerged = ctxt.em().merge(doomed);
ctxt.em().remove(doomedAndMerged);
/**
* @todo consider adding an em.flush here (despite the performance
* impact) if you need to operate on the dataset below. Without the
* flush, the dataset still thinks it has the file that was just
* deleted.
*/
// ctxt.em().flush();
/**
* We *could* re-index the entire dataset but it's more efficient to
* target individual files for deletion, which should always be drafts.
*
* See also https://redmine.hmdc.harvard.edu/issues/3786
*/
String indexingResult = ctxt.index().removeSolrDocFromIndex(IndexServiceBean.solrDocIdentifierFile + doomed.getId() + "_draft");
/**
* @todo check indexing result for success or failure. Really, we need
* an indexing queuing system:
* https://redmine.hmdc.harvard.edu/issues/3643
*/
}
use of edu.harvard.iq.dataverse.engine.command.exception.PermissionException in project dataverse by IQSS.
the class MoveDatasetCommand method executeImpl.
@Override
public void executeImpl(CommandContext ctxt) throws CommandException {
// first check if user is a superuser
if ((!(getUser() instanceof AuthenticatedUser) || !getUser().isSuperuser())) {
throw new PermissionException("Move Dataset can only be called by superusers.", this, Collections.singleton(Permission.DeleteDatasetDraft), moved);
}
// validate the move makes sense
if (moved.getOwner().equals(destination)) {
throw new IllegalCommandException("Dataset already in this Dataverse ", this);
}
if (moved.isReleased() && !destination.isReleased()) {
throw new IllegalCommandException("Published Dataset may not be moved to unpublished Dataverse. You may publish " + destination.getDisplayName() + " and re-try the move.", this);
}
// if the datasets guestbook is not contained in the new dataverse then remove it
if (moved.getGuestbook() != null) {
Guestbook gb = moved.getGuestbook();
List<Guestbook> gbs = destination.getGuestbooks();
boolean inheritGuestbooksValue = !destination.isGuestbookRoot();
if (inheritGuestbooksValue && destination.getOwner() != null) {
for (Guestbook pg : destination.getParentGuestbooks()) {
gbs.add(pg);
}
}
if (gbs == null || !gbs.contains(gb)) {
if (force == null || !force) {
throw new IllegalCommandException("Dataset guestbook is not in target dataverse. Please use the parameter ?forceMove=true to complete the move. This will delete the guestbook from the Dataset", this);
}
moved.setGuestbook(null);
}
}
// OK, move
moved.setOwner(destination);
ctxt.em().merge(moved);
try {
boolean doNormalSolrDocCleanUp = true;
ctxt.index().indexDataset(moved, doNormalSolrDocCleanUp);
} catch (Exception e) {
// RuntimeException e ) {
// , e);
logger.log(Level.WARNING, "Exception while indexing:" + e.getMessage());
throw new CommandException("Dataset could not be moved. Indexing failed", this);
}
}
Aggregations