Search in sources :

Example 81 with CommandException

use of edu.harvard.iq.dataverse.engine.command.exception.CommandException in project dataverse by IQSS.

the class CreateDatasetCommand method execute.

@Override
public Dataset execute(CommandContext ctxt) throws CommandException {
    SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd-hh.mm.ss");
    IdServiceBean idServiceBean = IdServiceBean.getBean(theDataset.getProtocol(), ctxt);
    if (theDataset.getIdentifier() == null || theDataset.getIdentifier().isEmpty()) {
        theDataset.setIdentifier(ctxt.datasets().generateDatasetIdentifier(theDataset, idServiceBean));
    }
    if ((importType != ImportType.MIGRATION && importType != ImportType.HARVEST) && !ctxt.datasets().isIdentifierUniqueInDatabase(theDataset.getIdentifier(), theDataset, idServiceBean)) {
        throw new IllegalCommandException(String.format("Dataset with identifier '%s', protocol '%s' and authority '%s' already exists", theDataset.getIdentifier(), theDataset.getProtocol(), theDataset.getAuthority()), this);
    }
    // If we are importing with the API, then we don't want to create an editable version,
    // just save the version is already in theDataset.
    DatasetVersion dsv = importType != null ? theDataset.getLatestVersion() : theDataset.getEditVersion();
    // validate
    // @todo for now we run through an initFields method that creates empty fields for anything without a value
    // that way they can be checked for required
    dsv.setDatasetFields(dsv.initDatasetFields());
    Set<ConstraintViolation> constraintViolations = dsv.validate();
    if (!constraintViolations.isEmpty()) {
        String validationFailedString = "Validation failed:";
        for (ConstraintViolation constraintViolation : constraintViolations) {
            validationFailedString += " " + constraintViolation.getMessage();
            validationFailedString += " Invalid value: '" + constraintViolation.getInvalidValue() + "'.";
        }
        throw new IllegalCommandException(validationFailedString, this);
    }
    theDataset.setCreator((AuthenticatedUser) getRequest().getUser());
    theDataset.setCreateDate(new Timestamp(new Date().getTime()));
    Iterator<DatasetField> dsfIt = dsv.getDatasetFields().iterator();
    while (dsfIt.hasNext()) {
        if (dsfIt.next().removeBlankDatasetFieldValues()) {
            dsfIt.remove();
        }
    }
    Iterator<DatasetField> dsfItSort = dsv.getDatasetFields().iterator();
    while (dsfItSort.hasNext()) {
        dsfItSort.next().setValueDisplayOrder();
    }
    Timestamp createDate = new Timestamp(new Date().getTime());
    dsv.setCreateTime(createDate);
    dsv.setLastUpdateTime(createDate);
    theDataset.setModificationTime(createDate);
    for (DataFile dataFile : theDataset.getFiles()) {
        dataFile.setCreator((AuthenticatedUser) getRequest().getUser());
        dataFile.setCreateDate(theDataset.getCreateDate());
    }
    String nonNullDefaultIfKeyNotFound = "";
    String protocol = ctxt.settings().getValueForKey(SettingsServiceBean.Key.Protocol, nonNullDefaultIfKeyNotFound);
    String authority = ctxt.settings().getValueForKey(SettingsServiceBean.Key.Authority, nonNullDefaultIfKeyNotFound);
    String doiSeparator = ctxt.settings().getValueForKey(SettingsServiceBean.Key.DoiSeparator, nonNullDefaultIfKeyNotFound);
    String doiProvider = ctxt.settings().getValueForKey(SettingsServiceBean.Key.DoiProvider, nonNullDefaultIfKeyNotFound);
    if (theDataset.getProtocol() == null)
        theDataset.setProtocol(protocol);
    if (theDataset.getAuthority() == null)
        theDataset.setAuthority(authority);
    if (theDataset.getDoiSeparator() == null)
        theDataset.setDoiSeparator(doiSeparator);
    if (theDataset.getStorageIdentifier() == null) {
        try {
            DataAccess.createNewStorageIO(theDataset, "placeholder");
        } catch (IOException ioex) {
            // if setting the storage identifier through createNewStorageIO fails, dataset creation
            // does not have to fail. we just set the storage id to a default -SF
            String storageDriver = (System.getProperty("dataverse.files.storage-driver-id") != null) ? System.getProperty("dataverse.files.storage-driver-id") : "file";
            theDataset.setStorageIdentifier(storageDriver + "://" + theDataset.getAuthority() + theDataset.getDoiSeparator() + theDataset.getIdentifier());
            logger.info("Failed to create StorageIO. StorageIdentifier set to default. Not fatal." + "(" + ioex.getMessage() + ")");
        }
    }
    if (theDataset.getIdentifier() == null) {
        /* 
                If this command is being executed to save a new dataset initialized
                by the Dataset page (in CREATE mode), it already has the persistent 
                identifier. 
                Same with a new harvested dataset - the imported metadata record
                must have contained a global identifier, for the harvester to be
                trying to save it permanently in the database. 
            
                In some other cases, such as when a new dataset is created 
                via the API, the identifier will need to be generated here. 
            
                        -- L.A. 4.6.2
             */
        theDataset.setIdentifier(ctxt.datasets().generateDatasetIdentifier(theDataset, idServiceBean));
    }
    logger.fine("Saving the files permanently.");
    ctxt.ingest().addFiles(dsv, theDataset.getFiles());
    logger.log(Level.FINE, "doiProvider={0} protocol={1}  importType={2}  GlobalIdCreateTime=={3}", new Object[] { doiProvider, protocol, importType, theDataset.getGlobalIdCreateTime() });
    // Attempt the registration if importing dataset through the API, or the app (but not harvest or migrate)
    if ((importType == null || importType.equals(ImportType.NEW)) && theDataset.getGlobalIdCreateTime() == null) {
        String doiRetString = "";
        idServiceBean = IdServiceBean.getBean(ctxt);
        try {
            logger.log(Level.FINE, "creating identifier");
            doiRetString = idServiceBean.createIdentifier(theDataset);
        } catch (Throwable e) {
            logger.log(Level.WARNING, "Exception while creating Identifier: " + e.getMessage(), e);
        }
        // Check return value to make sure registration succeeded
        if (!idServiceBean.registerWhenPublished() && doiRetString.contains(theDataset.getIdentifier())) {
            theDataset.setGlobalIdCreateTime(createDate);
        }
    } else // so set the globalIdCreateTime to now
    if (theDataset.getLatestVersion().getVersionState().equals(VersionState.RELEASED)) {
        theDataset.setGlobalIdCreateTime(new Date());
    }
    if (registrationRequired && theDataset.getGlobalIdCreateTime() == null) {
        throw new IllegalCommandException("Dataset could not be created.  Registration failed", this);
    }
    logger.log(Level.FINE, "after doi {0}", formatter.format(new Date().getTime()));
    Dataset savedDataset = ctxt.em().merge(theDataset);
    logger.log(Level.FINE, "after db update {0}", formatter.format(new Date().getTime()));
    // set the role to be default contributor role for its dataverse
    if (importType == null || importType.equals(ImportType.NEW)) {
        String privateUrlToken = null;
        ctxt.roles().save(new RoleAssignment(savedDataset.getOwner().getDefaultContributorRole(), getRequest().getUser(), savedDataset, privateUrlToken));
    }
    savedDataset.setPermissionModificationTime(new Timestamp(new Date().getTime()));
    savedDataset = ctxt.em().merge(savedDataset);
    if (template != null) {
        ctxt.templates().incrementUsageCount(template.getId());
    }
    logger.fine("Checking if rsync support is enabled.");
    if (DataCaptureModuleUtil.rsyncSupportEnabled(ctxt.settings().getValueForKey(SettingsServiceBean.Key.UploadMethods))) {
        try {
            ScriptRequestResponse scriptRequestResponse = ctxt.engine().submit(new RequestRsyncScriptCommand(getRequest(), savedDataset));
            logger.fine("script: " + scriptRequestResponse.getScript());
        } catch (RuntimeException ex) {
            logger.info("Problem getting rsync script: " + ex.getLocalizedMessage());
        }
    }
    logger.fine("Done with rsync request, if any.");
    try {
        /**
         * @todo Do something with the result. Did it succeed or fail?
         */
        boolean doNormalSolrDocCleanUp = true;
        ctxt.index().indexDataset(savedDataset, doNormalSolrDocCleanUp);
    } catch (Exception e) {
        // RuntimeException e ) {
        // , e);
        logger.log(Level.WARNING, "Exception while indexing:" + e.getMessage());
        /**
         * Even though the original intention appears to have been to allow the
         * dataset to be successfully created, even if an exception is thrown during
         * the indexing - in reality, a runtime exception there, even caught,
         * still forces the EJB transaction to be rolled back; hence the
         * dataset is NOT created... but the command completes and exits as if
         * it has been successful.
         * So I am going to throw a Command Exception here, to avoid this.
         * If we DO want to be able to create datasets even if they cannot
         * be immediately indexed, we'll have to figure out how to do that.
         * (Note that import is still possible when Solr is down - because indexDataset()
         * does NOT throw an exception if it is.
         * -- L.A. 4.5
         */
        throw new CommandException("Dataset could not be created. Indexing failed", this);
    }
    logger.log(Level.FINE, "after index {0}", formatter.format(new Date().getTime()));
    // if we are not migrating, assign the user to this version
    if (importType == null || importType.equals(ImportType.NEW)) {
        DatasetVersionUser datasetVersionDataverseUser = new DatasetVersionUser();
        String id = getRequest().getUser().getIdentifier();
        id = id.startsWith("@") ? id.substring(1) : id;
        AuthenticatedUser au = ctxt.authentication().getAuthenticatedUser(id);
        datasetVersionDataverseUser.setAuthenticatedUser(au);
        datasetVersionDataverseUser.setDatasetVersion(savedDataset.getLatestVersion());
        datasetVersionDataverseUser.setLastUpdateDate(createDate);
        if (savedDataset.getLatestVersion().getId() == null) {
            logger.warning("CreateDatasetCommand: savedDataset version id is null");
        } else {
            datasetVersionDataverseUser.setDatasetVersion(savedDataset.getLatestVersion());
        }
        ctxt.em().merge(datasetVersionDataverseUser);
    }
    logger.log(Level.FINE, "after create version user " + formatter.format(new Date().getTime()));
    return savedDataset;
}
Also used : ScriptRequestResponse(edu.harvard.iq.dataverse.datacapturemodule.ScriptRequestResponse) IllegalCommandException(edu.harvard.iq.dataverse.engine.command.exception.IllegalCommandException) IOException(java.io.IOException) CommandException(edu.harvard.iq.dataverse.engine.command.exception.CommandException) IllegalCommandException(edu.harvard.iq.dataverse.engine.command.exception.IllegalCommandException) Timestamp(java.sql.Timestamp) AuthenticatedUser(edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser) Date(java.util.Date) CommandException(edu.harvard.iq.dataverse.engine.command.exception.CommandException) IOException(java.io.IOException) IllegalCommandException(edu.harvard.iq.dataverse.engine.command.exception.IllegalCommandException) ConstraintViolation(javax.validation.ConstraintViolation) SimpleDateFormat(java.text.SimpleDateFormat)

Example 82 with CommandException

use of edu.harvard.iq.dataverse.engine.command.exception.CommandException in project dataverse by IQSS.

the class DeleteDataFileCommand method executeImpl.

@Override
protected void executeImpl(CommandContext ctxt) throws CommandException {
    if (destroy) {
        // for now, if called as destroy, will check for superuser acess
        if (doomed.getOwner().isReleased() && (!(getUser() instanceof AuthenticatedUser) || !getUser().isSuperuser())) {
            throw new PermissionException("Destroy can only be called by superusers.", this, Collections.singleton(Permission.DeleteDatasetDraft), doomed);
        }
    } else // 3. confirm that version is not released
    if (doomed.isReleased() || doomed.getFileMetadatas().size() > 1 || doomed.getFileMetadata().getDatasetVersion().isReleased()) {
        throw new CommandException("Cannot delete file: the DataFile is published, is attached to more than one Dataset Version, or is attached to a released Dataset Version.", this);
    }
    // We need to delete a bunch of physical files, either from the file system,
    // or from some other storage medium where the datafile is stored,
    // via its StorageIO driver.
    // First we delete the derivative files, then try to delete the data
    // file itself; if that
    // fails, we throw an exception and abort the command without
    // trying to remove the object from the database.
    // However, we do not attempt to do any deletes if this is a Harvested
    // file.
    logger.log(Level.FINE, "Delete command called on an unpublished DataFile {0}", doomed.getId());
    if (!doomed.isHarvested() && !StringUtil.isEmpty(doomed.getStorageIdentifier())) {
        if (FileUtil.isPackageFile(doomed)) {
            try {
                String datasetDirectory = doomed.getOwner().getFileSystemDirectory().toString();
                Path datasetDirectoryPath = Paths.get(datasetDirectory, doomed.getStorageIdentifier());
                Files.walkFileTree(datasetDirectoryPath, new SimpleFileVisitor<Path>() {

                    @Override
                    public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException {
                        Files.delete(file);
                        return FileVisitResult.CONTINUE;
                    }

                    @Override
                    public FileVisitResult visitFileFailed(final Path file, final IOException e) {
                        return handleException(e);
                    }

                    private FileVisitResult handleException(final IOException e) {
                        logger.warning("Failed to delete file due to" + e.getMessage());
                        return FileVisitResult.TERMINATE;
                    }

                    @Override
                    public FileVisitResult postVisitDirectory(final Path dir, final IOException e) throws IOException {
                        if (e != null)
                            return handleException(e);
                        Files.delete(dir);
                        return FileVisitResult.CONTINUE;
                    }
                });
            } catch (IOException ioex) {
                throw new CommandExecutionException("Failed to delete package file " + doomed.getStorageIdentifier(), ioex, this);
            }
            logger.info("Successfully deleted the package file " + doomed.getStorageIdentifier());
        } else {
            logger.log(Level.FINE, "Storage identifier for the file: {0}", doomed.getStorageIdentifier());
            StorageIO<DataFile> storageIO = null;
            try {
                storageIO = doomed.getStorageIO();
            } catch (IOException ioex) {
                throw new CommandExecutionException("Failed to initialize physical access driver.", ioex, this);
            }
            if (storageIO != null) {
                // log file and proceed deleting the database object.
                try {
                    storageIO.open();
                    storageIO.deleteAllAuxObjects();
                } catch (IOException ioex) {
                    Logger.getLogger(DeleteDataFileCommand.class.getName()).log(Level.SEVERE, "Error deleting Auxiliary file(s) while deleting DataFile {0}", doomed.getStorageIdentifier());
                }
                // We only want to attempt to delete the main physical file
                // if it actually exists, on the filesystem or whereever it
                // is actually stored by its StorageIO:
                boolean physicalFileExists = false;
                try {
                    physicalFileExists = storageIO.exists();
                } catch (IOException ioex) {
                    // We'll assume that an exception here means that the file does not
                    // exist; so we can skip trying to delete it.
                    physicalFileExists = false;
                }
                if (physicalFileExists) {
                    try {
                        storageIO.delete();
                    } catch (IOException ex) {
                        // This we will treat as a fatal condition:
                        throw new CommandExecutionException("Error deleting physical file object while deleting DataFile " + doomed.getId() + " from the database.", ex, this);
                    }
                }
                logger.log(Level.FINE, "Successfully deleted physical storage object (file) for the DataFile {0}", doomed.getId());
                // Destroy the storageIO object - we will need to purge the
                // DataFile from the database (below), so we don't want to have any
                // objects in this transaction that reference it:
                storageIO = null;
            }
        }
    }
    DataFile doomedAndMerged = ctxt.em().merge(doomed);
    ctxt.em().remove(doomedAndMerged);
    /**
     * @todo consider adding an em.flush here (despite the performance
     * impact) if you need to operate on the dataset below. Without the
     * flush, the dataset still thinks it has the file that was just
     * deleted.
     */
    // ctxt.em().flush();
    /**
     * We *could* re-index the entire dataset but it's more efficient to
     * target individual files for deletion, which should always be drafts.
     *
     * See also https://redmine.hmdc.harvard.edu/issues/3786
     */
    String indexingResult = ctxt.index().removeSolrDocFromIndex(IndexServiceBean.solrDocIdentifierFile + doomed.getId() + "_draft");
/**
 * @todo check indexing result for success or failure. Really, we need
 * an indexing queuing system:
 * https://redmine.hmdc.harvard.edu/issues/3643
 */
}
Also used : PermissionException(edu.harvard.iq.dataverse.engine.command.exception.PermissionException) Path(java.nio.file.Path) FileVisitResult(java.nio.file.FileVisitResult) CommandException(edu.harvard.iq.dataverse.engine.command.exception.CommandException) IOException(java.io.IOException) AuthenticatedUser(edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser) DataFile(edu.harvard.iq.dataverse.DataFile) CommandExecutionException(edu.harvard.iq.dataverse.engine.command.exception.CommandExecutionException) BasicFileAttributes(java.nio.file.attribute.BasicFileAttributes)

Example 83 with CommandException

use of edu.harvard.iq.dataverse.engine.command.exception.CommandException in project dataverse by IQSS.

the class FinalizeDatasetPublicationCommand method publicizeExternalIdentifier.

private void publicizeExternalIdentifier(Dataset dataset, CommandContext ctxt) throws CommandException {
    String protocol = theDataset.getProtocol();
    IdServiceBean idServiceBean = IdServiceBean.getBean(protocol, ctxt);
    if (idServiceBean != null)
        try {
            idServiceBean.publicizeIdentifier(dataset);
        } catch (Throwable e) {
            throw new CommandException(BundleUtil.getStringFromBundle("dataset.publish.error", idServiceBean.getProviderInformation()), this);
        }
}
Also used : IdServiceBean(edu.harvard.iq.dataverse.IdServiceBean) CommandException(edu.harvard.iq.dataverse.engine.command.exception.CommandException) IllegalCommandException(edu.harvard.iq.dataverse.engine.command.exception.IllegalCommandException)

Example 84 with CommandException

use of edu.harvard.iq.dataverse.engine.command.exception.CommandException in project dataverse by IQSS.

the class FinalizeDatasetPublicationCommand method execute.

@Override
public Dataset execute(CommandContext ctxt) throws CommandException {
    registerExternalIdentifier(theDataset, ctxt);
    if (theDataset.getPublicationDate() == null) {
        theDataset.setReleaseUser((AuthenticatedUser) getUser());
        theDataset.setPublicationDate(new Timestamp(new Date().getTime()));
    }
    // update metadata
    Timestamp updateTime = new Timestamp(new Date().getTime());
    theDataset.getEditVersion().setReleaseTime(updateTime);
    theDataset.getEditVersion().setLastUpdateTime(updateTime);
    theDataset.setModificationTime(updateTime);
    theDataset.setFileAccessRequest(theDataset.getLatestVersion().getTermsOfUseAndAccess().isFileAccessRequest());
    updateFiles(updateTime, ctxt);
    // 
    // TODO: Not sure if this .merge() is necessary here - ?
    // I'm moving a bunch of code from PublishDatasetCommand here; and this .merge()
    // comes from there. There's a chance that the final merge, at the end of this
    // command, would be sufficient. -- L.A. Sep. 6 2017
    theDataset = ctxt.em().merge(theDataset);
    DatasetVersionUser ddu = ctxt.datasets().getDatasetVersionUser(theDataset.getLatestVersion(), getUser());
    if (ddu == null) {
        ddu = new DatasetVersionUser();
        ddu.setDatasetVersion(theDataset.getLatestVersion());
        String id = getUser().getIdentifier();
        id = id.startsWith("@") ? id.substring(1) : id;
        AuthenticatedUser au = ctxt.authentication().getAuthenticatedUser(id);
        ddu.setAuthenticatedUser(au);
    }
    ddu.setLastUpdateDate((Timestamp) updateTime);
    ctxt.em().merge(ddu);
    updateParentDataversesSubjectsField(theDataset, ctxt);
    publicizeExternalIdentifier(theDataset, ctxt);
    PrivateUrl privateUrl = ctxt.engine().submit(new GetPrivateUrlCommand(getRequest(), theDataset));
    if (privateUrl != null) {
        ctxt.engine().submit(new DeletePrivateUrlCommand(getRequest(), theDataset));
    }
    theDataset.getEditVersion().setVersionState(DatasetVersion.VersionState.RELEASED);
    exportMetadata(ctxt.settings());
    boolean doNormalSolrDocCleanUp = true;
    ctxt.index().indexDataset(theDataset, doNormalSolrDocCleanUp);
    ctxt.solrIndex().indexPermissionsForOneDvObject(theDataset);
    // Remove locks
    ctxt.engine().submit(new RemoveLockCommand(getRequest(), theDataset, DatasetLock.Reason.Workflow));
    if (theDataset.isLockedFor(DatasetLock.Reason.InReview)) {
        ctxt.engine().submit(new RemoveLockCommand(getRequest(), theDataset, DatasetLock.Reason.InReview));
    }
    ctxt.workflows().getDefaultWorkflow(TriggerType.PostPublishDataset).ifPresent(wf -> {
        try {
            ctxt.workflows().start(wf, buildContext(doiProvider, TriggerType.PostPublishDataset));
        } catch (CommandException ex) {
            logger.log(Level.SEVERE, "Error invoking post-publish workflow: " + ex.getMessage(), ex);
        }
    });
    Dataset resultSet = ctxt.em().merge(theDataset);
    if (resultSet != null) {
        notifyUsersDatasetPublish(ctxt, theDataset);
    }
    return resultSet;
}
Also used : DatasetVersionUser(edu.harvard.iq.dataverse.DatasetVersionUser) PrivateUrl(edu.harvard.iq.dataverse.privateurl.PrivateUrl) Dataset(edu.harvard.iq.dataverse.Dataset) CommandException(edu.harvard.iq.dataverse.engine.command.exception.CommandException) IllegalCommandException(edu.harvard.iq.dataverse.engine.command.exception.IllegalCommandException) Timestamp(java.sql.Timestamp) AuthenticatedUser(edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser) Date(java.util.Date)

Example 85 with CommandException

use of edu.harvard.iq.dataverse.engine.command.exception.CommandException in project dataverse by IQSS.

the class ImportFromFileSystemCommand method execute.

@Override
public JsonObject execute(CommandContext ctxt) throws CommandException {
    JsonObjectBuilder bld = jsonObjectBuilder();
    /**
     * batch import as-individual-datafiles is disabled in this iteration;
     * only the import-as-a-package is allowed. -- L.A. Feb 2 2017
     */
    String fileMode = FileRecordWriter.FILE_MODE_PACKAGE_FILE;
    try {
        /**
         * Current constraints: 1. only supports merge and replace mode 2.
         * valid dataset 3. valid dataset directory 4. valid user & user has
         * edit dataset permission 5. only one dataset version 6. dataset
         * version is draft
         */
        if (!mode.equalsIgnoreCase("MERGE") && !mode.equalsIgnoreCase("REPLACE")) {
            String error = "Import mode: " + mode + " is not currently supported.";
            logger.info(error);
            throw new IllegalCommandException(error, this);
        }
        if (!fileMode.equals(FileRecordWriter.FILE_MODE_INDIVIDUAL_FILES) && !fileMode.equals(FileRecordWriter.FILE_MODE_PACKAGE_FILE)) {
            String error = "File import mode: " + fileMode + " is not supported.";
            logger.info(error);
            throw new IllegalCommandException(error, this);
        }
        File directory = new File(System.getProperty("dataverse.files.directory") + File.separator + dataset.getAuthority() + File.separator + dataset.getIdentifier());
        if (!isValidDirectory(directory)) {
            String error = "Dataset directory is invalid. " + directory;
            logger.info(error);
            throw new IllegalCommandException(error, this);
        }
        if (Strings.isNullOrEmpty(uploadFolder)) {
            String error = "No uploadFolder specified";
            logger.info(error);
            throw new IllegalCommandException(error, this);
        }
        File uploadDirectory = new File(System.getProperty("dataverse.files.directory") + File.separator + dataset.getAuthority() + File.separator + dataset.getIdentifier() + File.separator + uploadFolder);
        if (!isValidDirectory(uploadDirectory)) {
            String error = "Upload folder is not a valid directory.";
            logger.info(error);
            throw new IllegalCommandException(error, this);
        }
        if (dataset.getVersions().size() != 1) {
            String error = "Error creating FilesystemImportJob with dataset with ID: " + dataset.getId() + " - Dataset has more than one version.";
            logger.info(error);
            throw new IllegalCommandException(error, this);
        }
        if (dataset.getLatestVersion().getVersionState() != DatasetVersion.VersionState.DRAFT) {
            String error = "Error creating FilesystemImportJob with dataset with ID: " + dataset.getId() + " - Dataset isn't in DRAFT mode.";
            logger.info(error);
            throw new IllegalCommandException(error, this);
        }
        try {
            long jid;
            Properties props = new Properties();
            props.setProperty("datasetId", dataset.getId().toString());
            props.setProperty("userId", getUser().getIdentifier().replace("@", ""));
            props.setProperty("mode", mode);
            props.setProperty("fileMode", fileMode);
            props.setProperty("uploadFolder", uploadFolder);
            if (totalSize != null && totalSize > 0) {
                props.setProperty("totalSize", totalSize.toString());
            }
            JobOperator jo = BatchRuntime.getJobOperator();
            jid = jo.start("FileSystemImportJob", props);
            if (jid > 0) {
                bld.add("executionId", jid).add("message", "FileSystemImportJob in progress");
                return bld.build();
            } else {
                String error = "Error creating FilesystemImportJob with dataset with ID: " + dataset.getId();
                logger.info(error);
                throw new CommandException(error, this);
            }
        } catch (JobStartException | JobSecurityException ex) {
            String error = "Error creating FilesystemImportJob with dataset with ID: " + dataset.getId() + " - " + ex.getMessage();
            logger.info(error);
            throw new IllegalCommandException(error, this);
        }
    } catch (Exception e) {
        bld.add("message", "Import Exception - " + e.getMessage());
        return bld.build();
    }
}
Also used : JobSecurityException(javax.batch.operations.JobSecurityException) JobStartException(javax.batch.operations.JobStartException) IllegalCommandException(edu.harvard.iq.dataverse.engine.command.exception.IllegalCommandException) JobOperator(javax.batch.operations.JobOperator) IllegalCommandException(edu.harvard.iq.dataverse.engine.command.exception.IllegalCommandException) CommandException(edu.harvard.iq.dataverse.engine.command.exception.CommandException) JsonObjectBuilder(javax.json.JsonObjectBuilder) Properties(java.util.Properties) File(java.io.File) JobStartException(javax.batch.operations.JobStartException) IllegalCommandException(edu.harvard.iq.dataverse.engine.command.exception.IllegalCommandException) JobSecurityException(javax.batch.operations.JobSecurityException) CommandException(edu.harvard.iq.dataverse.engine.command.exception.CommandException)

Aggregations

CommandException (edu.harvard.iq.dataverse.engine.command.exception.CommandException)86 Dataset (edu.harvard.iq.dataverse.Dataset)21 AuthenticatedUser (edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser)20 IllegalCommandException (edu.harvard.iq.dataverse.engine.command.exception.IllegalCommandException)19 Test (org.junit.Test)16 PermissionException (edu.harvard.iq.dataverse.engine.command.exception.PermissionException)15 EJBException (javax.ejb.EJBException)13 DataverseRequest (edu.harvard.iq.dataverse.engine.command.DataverseRequest)12 DataFile (edu.harvard.iq.dataverse.DataFile)11 Dataverse (edu.harvard.iq.dataverse.Dataverse)9 UpdateDatasetCommand (edu.harvard.iq.dataverse.engine.command.impl.UpdateDatasetCommand)9 ConstraintViolation (javax.validation.ConstraintViolation)9 DatasetThumbnail (edu.harvard.iq.dataverse.dataset.DatasetThumbnail)8 IOException (java.io.IOException)8 Timestamp (java.sql.Timestamp)8 ArrayList (java.util.ArrayList)8 Date (java.util.Date)8 FacesMessage (javax.faces.application.FacesMessage)7 DatasetVersion (edu.harvard.iq.dataverse.DatasetVersion)6 SwordError (org.swordapp.server.SwordError)6