use of org.akaza.openclinica.bean.extract.ExtractPropertyBean in project OpenClinica by OpenClinica.
the class CoreResources method findExtractProperties.
private ArrayList<ExtractPropertyBean> findExtractProperties() throws OpenClinicaSystemException {
ArrayList<ExtractPropertyBean> ret = new ArrayList<ExtractPropertyBean>();
// ExtractPropertyBean epbean = new ExtractPropertyBean();
int i = 1;
int maxExtractOption = getMaxExtractCounterValue();
while (i <= maxExtractOption) {
if (!getExtractField("extract." + i + ".file").equals("")) {
ExtractPropertyBean epbean = new ExtractPropertyBean();
epbean.setId(i);
// we will implement a find by id function in the front end
// check to make sure the file exists, if not throw an exception and system will abort to start.
checkForFile(getExtractFields("extract." + i + ".file"));
epbean.setFileName(getExtractFields("extract." + i + ".file"));
// file name of the xslt stylesheet
epbean.setFiledescription(getExtractField("extract." + i + ".fileDescription"));
// description of the choice of format
epbean.setHelpText(getExtractField("extract." + i + ".helpText"));
// help text, currently in the alt-text of the link
epbean.setLinkText(getExtractField("extract." + i + ".linkText"));
// link text of the choice of format
// epbean.setRolesAllowed(getExtractField("xsl.allowed." + i).split(","));
// which roles are allowed to see the choice?
epbean.setFileLocation(getExtractField("extract." + i + ".location"));
// destination of the copied files
// epbean.setFormat(getExtractField("xsl.format." + i));
// if (("").equals(epbean.getFormat())) {
// }
// formatting choice. currently permenantly set at oc1.3
/*
* String clinica = getExtractField("extract."+i+".odmType"); if(clinica!=null) {
* if(clinica.equalsIgnoreCase("clinical_data")) epbean.setFormat("occlinical_data"); else
* epbean.setFormat("oc1.3"); } else
*/
epbean.setOdmType(getExtractField("extract." + i + ".odmType"));
epbean.setFormat("oc1.3");
// destination file name of the copied files
epbean.setExportFileName(getExtractFields("extract." + i + ".exportname"));
// post-processing event after the creation
String whichFunction = getExtractField("extract." + i + ".post").toLowerCase();
// added by JN: Zipformat comes from extract properties returns true by default
epbean.setZipFormat(getExtractFieldBoolean("extract." + i + ".zip"));
epbean.setDeleteOld(getExtractFieldBoolean("extract." + i + ".deleteOld"));
epbean.setSuccessMessage(getExtractField("extract." + i + ".success"));
epbean.setFailureMessage(getExtractField("extract." + i + ".failure"));
epbean.setZipName(getExtractField("extract." + i + ".zipName"));
if (epbean.getFileName().length != epbean.getExportFileName().length)
throw new OpenClinicaSystemException("The comma seperated values of file names and export file names should correspond 1 on 1 for the property number" + i);
if ("sql".equals(whichFunction)) {
// set the bean within, so that we can access the file locations etc
SqlProcessingFunction function = new SqlProcessingFunction(epbean);
String whichSettings = getExtractField("xsl.post." + i + ".sql");
if (!"".equals(whichSettings)) {
function.setDatabaseType(getExtractFieldNoRep(whichSettings + ".dataBase").toLowerCase());
function.setDatabaseUrl(getExtractFieldNoRep(whichSettings + ".url"));
function.setDatabaseUsername(getExtractFieldNoRep(whichSettings + ".username"));
function.setDatabasePassword(getExtractFieldNoRep(whichSettings + ".password"));
} else {
// set default db settings here
function.setDatabaseType(getField("dataBase"));
function.setDatabaseUrl(getField("url"));
function.setDatabaseUsername(getField("username"));
function.setDatabasePassword(getField("password"));
}
// also pre-set the database connection stuff
epbean.setPostProcessing(function);
// System.out.println("found db password: " + function.getDatabasePassword());
} else if ("pdf".equals(whichFunction)) {
// TODO add other functions here
epbean.setPostProcessing(new PdfProcessingFunction());
} else if ("sas".equals(whichFunction)) {
epbean.setPostProcessing(new SasProcessingFunction());
} else if (!whichFunction.isEmpty()) {
String postProcessorName = getExtractField(whichFunction + ".postProcessor");
if (postProcessorName.equals("pdf")) {
epbean.setPostProcessing(new PdfProcessingFunction());
epbean.setPostProcDeleteOld(getExtractFieldBoolean(whichFunction + ".deleteOld"));
epbean.setPostProcZip(getExtractFieldBoolean(whichFunction + ".zip"));
epbean.setPostProcLocation(getExtractField(whichFunction + ".location"));
epbean.setPostProcExportName(getExtractField(whichFunction + ".exportname"));
} else // since the database is the last option TODO: think about custom post processing options
{
SqlProcessingFunction function = new SqlProcessingFunction(epbean);
function.setDatabaseType(getExtractFieldNoRep(whichFunction + ".dataBase").toLowerCase());
function.setDatabaseUrl(getExtractFieldNoRep(whichFunction + ".url"));
function.setDatabaseUsername(getExtractFieldNoRep(whichFunction + ".username"));
function.setDatabasePassword(getExtractFieldNoRep(whichFunction + ".password"));
epbean.setPostProcessing(function);
}
} else {
// add a null here
epbean.setPostProcessing(null);
}
ret.add(epbean);
}
i++;
}
// System.out.println("found " + ret.size() + " records in extract.properties");
return ret;
}
use of org.akaza.openclinica.bean.extract.ExtractPropertyBean in project OpenClinica by OpenClinica.
the class CoreResources method findExtractPropertyBeanById.
// JN: by using static when u click same export link from 2 different datasets the first one stays in tact and is
// saved in
// there.
/**
*/
public ExtractPropertyBean findExtractPropertyBeanById(int id, String datasetId) {
boolean notDone = true;
ArrayList<ExtractPropertyBean> epBeans = findExtractProperties();
ExtractPropertyBean returnBean = null;
for (ExtractPropertyBean epbean : epBeans) {
if (epbean.getId() == id) {
epbean.setDatasetId(datasetId);
notDone = false;
// returnBean = epbean;
return epbean;
}
}
return returnBean;
}
use of org.akaza.openclinica.bean.extract.ExtractPropertyBean in project OpenClinica by OpenClinica.
the class XsltTransformJob method executeInternal.
@Override
protected void executeInternal(JobExecutionContext context) {
logger.info("Job " + context.getJobDetail().getFullName() + " started.");
initDependencies(context.getScheduler());
// need to generate a Locale for emailing users with i18n
// TODO make dynamic?
Locale locale = new Locale("en-US");
ResourceBundleProvider.updateLocale(locale);
ResourceBundle pageMessages = ResourceBundleProvider.getPageMessagesBundle();
List<File> markForDelete = new LinkedList<File>();
Boolean zipped = true;
Boolean deleteOld = true;
Boolean exceptions = false;
JobDataMap dataMap = context.getMergedJobDataMap();
String localeStr = dataMap.getString(LOCALE);
String[] doNotDeleteUntilExtract = new String[4];
int cnt = dataMap.getInt("count");
DatasetBean datasetBean = null;
if (localeStr != null) {
locale = new Locale(localeStr);
ResourceBundleProvider.updateLocale(locale);
pageMessages = ResourceBundleProvider.getPageMessagesBundle();
}
// get the file information from the job
String alertEmail = dataMap.getString(EMAIL);
java.io.InputStream in = null;
FileOutputStream endFileStream = null;
UserAccountBean userBean = null;
try {
// init all fields from the data map
int userAccountId = dataMap.getInt(USER_ID);
int studyId = dataMap.getInt(STUDY_ID);
String outputPath = dataMap.getString(POST_FILE_PATH);
// get all user info, generate xml
logger.debug("found output path: " + outputPath);
String generalFileDir = dataMap.getString(XML_FILE_PATH);
int dsId = dataMap.getInt(DATASET_ID);
// JN: Change from earlier versions, cannot get static reference as
// static references don't work. Reason being for example there could be
// datasetId as a variable which is different for each dataset and
// that needs to be loaded dynamically
ExtractPropertyBean epBean = (ExtractPropertyBean) dataMap.get(EP_BEAN);
File doNotDelDir = new File(generalFileDir);
if (doNotDelDir.isDirectory()) {
doNotDeleteUntilExtract = doNotDelDir.list();
}
zipped = epBean.getZipFormat();
deleteOld = epBean.getDeleteOld();
long sysTimeBegin = System.currentTimeMillis();
userBean = (UserAccountBean) userAccountDao.findByPK(userAccountId);
StudyBean currentStudy = (StudyBean) studyDao.findByPK(studyId);
StudyBean parentStudy = (StudyBean) studyDao.findByPK(currentStudy.getParentStudyId());
String successMsg = epBean.getSuccessMessage();
String failureMsg = epBean.getFailureMessage();
final long start = System.currentTimeMillis();
datasetBean = (DatasetBean) datasetDao.findByPK(dsId);
ExtractBean eb = generateFileService.generateExtractBean(datasetBean, currentStudy, parentStudy);
// generate file directory for file service
datasetBean.setName(datasetBean.getName().replaceAll(" ", "_"));
logger.debug("--> job starting: ");
HashMap<String, Integer> answerMap = odmFileCreation.createODMFile(epBean.getFormat(), sysTimeBegin, generalFileDir, datasetBean, currentStudy, "", eb, currentStudy.getId(), currentStudy.getParentStudyId(), "99", (Boolean) dataMap.get(ZIPPED), false, (Boolean) dataMap.get(DELETE_OLD), epBean.getOdmType(), userBean);
// won't save a record of the XML to db
// won't be a zipped file, so that we can submit it for
// transformation
// this will have to be toggled by the export data format? no, the
// export file will have to be zipped/not zipped
String ODMXMLFileName = "";
int fId = 0;
Iterator<Entry<String, Integer>> it = answerMap.entrySet().iterator();
while (it.hasNext()) {
JobTerminationMonitor.check();
Entry<String, Integer> entry = it.next();
String key = entry.getKey();
Integer value = entry.getValue();
// JN: Since there is a logic to
ODMXMLFileName = key;
// delete all the intermittent
// files, this file could be a zip
// file.
Integer fileID = value;
fId = fileID.intValue();
logger.debug("found " + fId + " and " + ODMXMLFileName);
}
logger.info("Finished ODM generation of job " + context.getJobDetail().getFullName());
// create dirs
File output = new File(outputPath);
if (!output.isDirectory()) {
output.mkdirs();
}
int numXLS = epBean.getFileName().length;
int fileCntr = 0;
String xmlFilePath = new File(generalFileDir + ODMXMLFileName).toURI().toURL().toExternalForm();
String endFile = null;
File oldFilesPath = new File(generalFileDir);
while (fileCntr < numXLS) {
JobTerminationMonitor.check();
String xsltPath = dataMap.getString(XSLT_PATH) + File.separator + epBean.getFileName()[fileCntr];
in = new java.io.FileInputStream(xsltPath);
Transformer transformer = transformerFactory.newTransformer(new StreamSource(in));
endFile = outputPath + File.separator + epBean.getExportFileName()[fileCntr];
endFileStream = new FileOutputStream(endFile);
transformer.transform(new StreamSource(xmlFilePath), new StreamResult(endFileStream));
// JN...CLOSE THE STREAM...HMMMM
in.close();
endFileStream.close();
fileCntr++;
JobTerminationMonitor.check();
}
if (oldFilesPath.isDirectory()) {
markForDelete = Arrays.asList(oldFilesPath.listFiles());
// logic to prevent deleting the file being created.
}
final double done = setFormat(new Double(System.currentTimeMillis() - start) / 1000);
logger.info("--> job completed in " + done + " ms");
// run post processing
ProcessingFunction function = epBean.getPostProcessing();
String subject = "";
String jobName = dataMap.getString(XsltTriggerService.JOB_NAME);
StringBuffer emailBuffer = new StringBuffer("");
emailBuffer.append("<p>" + pageMessages.getString("email_header_1") + " " + EmailEngine.getAdminEmail() + " " + pageMessages.getString("email_header_2") + " Job Execution " + pageMessages.getString("email_header_3") + "</p>");
emailBuffer.append("<P>Dataset: " + datasetBean.getName() + "</P>");
emailBuffer.append("<P>Study: " + currentStudy.getName() + "</P>");
if (function != null && function.getClass().equals(org.akaza.openclinica.bean.service.SqlProcessingFunction.class)) {
String dbUrl = ((org.akaza.openclinica.bean.service.SqlProcessingFunction) function).getDatabaseUrl();
int lastIndex = dbUrl.lastIndexOf('/');
String schemaName = dbUrl.substring(lastIndex);
int HostIndex = dbUrl.substring(0, lastIndex).indexOf("//");
String Host = dbUrl.substring(HostIndex, lastIndex);
emailBuffer.append("<P>Database: " + ((org.akaza.openclinica.bean.service.SqlProcessingFunction) function).getDatabaseType() + "</P>");
emailBuffer.append("<P>Schema: " + schemaName.replace("/", "") + "</P>");
emailBuffer.append("<P>Host: " + Host.replace("//", "") + "</P>");
}
emailBuffer.append("<p>" + pageMessages.getString("html_email_body_1") + datasetBean.getName() + pageMessages.getString("html_email_body_2_2") + "</p>");
if (function != null) {
function.setTransformFileName(outputPath + File.separator + dataMap.getString(POST_FILE_NAME));
function.setODMXMLFileName(endFile);
function.setXslFileName(dataMap.getString(XSL_FILE_PATH));
function.setDeleteOld((Boolean) dataMap.get(POST_PROC_DELETE_OLD));
function.setZip((Boolean) dataMap.get(POST_PROC_ZIP));
function.setLocation(dataMap.getString(POST_PROC_LOCATION));
function.setExportFileName(dataMap.getString(POST_PROC_EXPORT_NAME));
File[] oldFiles = getOldFiles(outputPath, dataMap.getString(POST_PROC_LOCATION));
function.setOldFiles(oldFiles);
File[] intermediateFiles = getInterFiles(dataMap.getString(POST_FILE_PATH));
ProcessingResultType message = function.run();
// Delete these files only in case when there is no failure
if (message.getCode().intValue() != 2) {
deleteOldFiles(intermediateFiles);
}
final long done2 = System.currentTimeMillis() - start;
logger.info("--> postprocessing completed in " + done2 + " ms, found result type " + message.getCode());
logger.info("--> postprocessing completed in " + done2 + " ms, found result type " + message.getCode());
if (!function.getClass().equals(org.akaza.openclinica.bean.service.SqlProcessingFunction.class)) {
String archivedFile = dataMap.getString(POST_FILE_NAME) + "." + function.getFileType();
// download the zip file
if (function.isZip()) {
archivedFile = archivedFile + ".zip";
}
// post processing as well.
if (function.getClass().equals(org.akaza.openclinica.bean.service.PdfProcessingFunction.class)) {
archivedFile = function.getArchivedFileName();
}
ArchivedDatasetFileBean fbFinal = generateFileRecord(archivedFile, outputPath, datasetBean, done, new File(outputPath + File.separator + archivedFile).length(), ExportFormatBean.PDFFILE, userAccountId);
if (successMsg.contains("$linkURL")) {
successMsg = successMsg.replace("$linkURL", "<a href=\"" + CoreResources.getField("sysURL.base") + "AccessFile?fileId=" + fbFinal.getId() + "\">" + CoreResources.getField("sysURL.base") + "AccessFile?fileId=" + fbFinal.getId() + " </a>");
}
emailBuffer.append("<p>" + successMsg + "</p>");
logger.debug("System time begining.." + sysTimeBegin);
logger.debug("System time end.." + System.currentTimeMillis());
double sysTimeEnd = setFormat((System.currentTimeMillis() - sysTimeBegin) / 1000);
logger.debug("difference" + sysTimeEnd);
if (fbFinal != null) {
fbFinal.setFileSize((int) bytesToKilo(new File(archivedFile).length()));
fbFinal.setRunTime(sysTimeEnd);
}
}
// otherwise don't do it
if (message.getCode().intValue() == 1) {
if (jobName != null) {
subject = "Success: " + jobName;
} else {
subject = "Success: " + datasetBean.getName();
}
} else if (message.getCode().intValue() == 2) {
if (jobName != null) {
subject = "Failure: " + jobName;
} else {
subject = "Failure: " + datasetBean.getName();
}
if (failureMsg != null && !failureMsg.isEmpty()) {
emailBuffer.append(failureMsg);
}
emailBuffer.append("<P>").append(message.getDescription());
postErrorMessage(message.getDescription(), context);
} else if (message.getCode().intValue() == 3) {
if (jobName != null) {
subject = "Update: " + jobName;
} else {
subject = "Update: " + datasetBean.getName();
}
}
} else {
// extract ran but no post-processing - we send an email with
// success and url to link to
// generate archived dataset file bean here, and use the id to
// build the URL
String archivedFilename = dataMap.getString(POST_FILE_NAME);
// the zip file
if (zipped) {
archivedFilename = dataMap.getString(POST_FILE_NAME) + ".zip";
}
// delete old files now
List<File> intermediateFiles = generateFileService.getOldFiles();
String[] dontDelFiles = epBean.getDoNotDelFiles();
// JN: The following is the code for zipping up the files, in case of more than one xsl being provided.
if (dontDelFiles.length > 1 && zipped) {
logger.debug("count =====" + cnt + "dontDelFiles length==---" + dontDelFiles.length);
logger.debug("Entering this?" + cnt + "dontDelFiles" + dontDelFiles);
String path = outputPath + File.separator;
logger.debug("path = " + path);
logger.debug("zipName?? = " + epBean.getZipName());
String zipName = epBean.getZipName() == null || epBean.getZipName().isEmpty() ? endFile + ".zip" : path + epBean.getZipName() + ".zip";
archivedFilename = new File(zipName).getName();
zipAll(path, epBean.getDoNotDelFiles(), zipName);
String[] tempArray = { archivedFilename };
dontDelFiles = tempArray;
endFile = archivedFilename;
} else if (zipped) {
markForDelete = zipxmls(markForDelete, endFile);
endFile = endFile + ".zip";
String[] temp = new String[dontDelFiles.length];
int i = 0;
while (i < dontDelFiles.length) {
temp[i] = dontDelFiles[i] + ".zip";
i++;
}
dontDelFiles = temp;
// Actually deleting all the xml files which are produced
// since its zipped
FilenameFilter xmlFilter = new XMLFileFilter();
File tempFile = new File(generalFileDir);
deleteOldFiles(tempFile.listFiles(xmlFilter));
}
ArchivedDatasetFileBean fbFinal = generateFileRecord(archivedFilename, outputPath, datasetBean, done, new File(outputPath + File.separator + archivedFilename).length(), ExportFormatBean.TXTFILE, userAccountId);
if (jobName != null) {
subject = "Job Ran: " + jobName;
} else {
subject = "Job Ran: " + datasetBean.getName();
}
if (successMsg == null || successMsg.isEmpty()) {
logger.info("email buffer??" + emailBuffer);
} else {
if (successMsg.contains("$linkURL")) {
successMsg = successMsg.replace("$linkURL", "<a href=\"" + CoreResources.getField("sysURL.base") + "AccessFile?fileId=" + fbFinal.getId() + "\">" + CoreResources.getField("sysURL.base") + "AccessFile?fileId=" + fbFinal.getId() + " </a>");
}
emailBuffer.append("<p>" + successMsg + "</p>");
}
if (deleteOld) {
deleteIntermFiles(intermediateFiles, endFile, dontDelFiles);
deleteIntermFiles(markForDelete, endFile, dontDelFiles);
}
}
// email the message to the user
emailBuffer.append("<p>" + pageMessages.getString("html_email_body_5") + "</p>");
try {
// @pgawade 19-April-2011 Log the event into audit_event table
if (null != dataMap.get("job_type") && ((String) dataMap.get("job_type")).equalsIgnoreCase("exportJob")) {
String extractName = (String) dataMap.get(XsltTriggerService.JOB_NAME);
TriggerBean triggerBean = new TriggerBean();
triggerBean.setDataset(datasetBean);
triggerBean.setUserAccount(userBean);
triggerBean.setFullName(extractName);
String actionMsg = "You may access the " + (String) dataMap.get(XsltTriggerService.EXPORT_FORMAT) + " file by changing your study/site to " + currentStudy.getName() + " and selecting the Export Data icon for " + datasetBean.getName() + " dataset on the View Datasets page.";
auditEventDAO.createRowForExtractDataJobSuccess(triggerBean, actionMsg);
}
mailSender.sendEmail(alertEmail, EmailEngine.getAdminEmail(), subject, emailBuffer.toString(), true);
} catch (OpenClinicaSystemException ose) {
// Do Nothing, In the future we might want to have an email
// status added to system.
logger.info("exception sending mail: " + ose.getMessage());
logger.error("exception sending mail: " + ose.getMessage());
}
logger.info("just sent email to " + alertEmail + ", from " + EmailEngine.getAdminEmail());
if (successMsg == null) {
successMsg = " ";
}
postSuccessMessage(successMsg, context);
} catch (JobInterruptedException e) {
logger.info("Job was cancelled by the user");
exceptions = true;
} catch (TransformerConfigurationException e) {
sendErrorEmail(e.getMessage(), context, alertEmail);
postErrorMessage(e.getMessage(), context);
logger.error("Error executing extract", e);
exceptions = true;
} catch (FileNotFoundException e) {
sendErrorEmail(e.getMessage(), context, alertEmail);
postErrorMessage(e.getMessage(), context);
logger.error("Error executing extract", e);
exceptions = true;
} catch (TransformerFactoryConfigurationError e) {
sendErrorEmail(e.getMessage(), context, alertEmail);
postErrorMessage(e.getMessage(), context);
logger.error("Error executing extract", e);
exceptions = true;
} catch (TransformerException e) {
sendErrorEmail(e.getMessage(), context, alertEmail);
postErrorMessage(e.getMessage(), context);
logger.error("Error executing extract", e);
exceptions = true;
} catch (Exception ee) {
sendErrorEmail(ee.getMessage(), context, alertEmail);
postErrorMessage(ee.getMessage(), context);
logger.error("Error executing extract", ee);
exceptions = true;
if (null != dataMap.get("job_type") && ((String) dataMap.get("job_type")).equalsIgnoreCase("exportJob")) {
TriggerBean triggerBean = new TriggerBean();
triggerBean.setUserAccount(userBean);
triggerBean.setFullName((String) dataMap.get(XsltTriggerService.JOB_NAME));
auditEventDAO.createRowForExtractDataJobFailure(triggerBean);
}
} finally {
if (in != null)
try {
in.close();
} catch (IOException e) {
logger.error("Error executing extract", e);
}
if (endFileStream != null)
try {
endFileStream.close();
} catch (IOException e) {
logger.error("Error executing extract", e);
}
if (exceptions) {
logger.debug("EXCEPTIONS... EVEN TEHN DELETING OFF OLD FILES");
String generalFileDir = dataMap.getString(XML_FILE_PATH);
File oldFilesPath = new File(generalFileDir);
if (oldFilesPath.isDirectory()) {
markForDelete = Arrays.asList(oldFilesPath.listFiles());
}
logger.debug("deleting the old files reference from archive dataset");
if (deleteOld) {
deleteIntermFiles(markForDelete, "", doNotDeleteUntilExtract);
}
}
if (datasetBean != null)
resetArchiveDataset(datasetBean.getId());
logger.info("Job " + context.getJobDetail().getFullName() + " finished.");
}
}
use of org.akaza.openclinica.bean.extract.ExtractPropertyBean in project OpenClinica by OpenClinica.
the class SystemController method getExtractModule.
/**
* @api {get} /pages/auth/api/v1/system/extract Retrieve Extract Properties
* @apiName getExtractProperties
* @apiPermission Authenticate using api-key. admin
* @apiVersion 3.8.0
* @apiGroup System
* @apiDescription Retrieves Extract Properties
* @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK
* {
* "extract.properties": {
* "extract.number": {
* "extract.number": "99"
* },
* "extract.1": {
* "zip": "true",
* "failure": "",
* "fileDescription": "CDISC ODM XML 1.3 Full with OpenClinica extensions",
* "linkText": "Run Now",
* "deleteOld": "true",
* "location": "$exportFilePath/$datasetName/ODM_1.3_Full",
* "file": "[copyXML.xsl]",
* "helpText": "CDISC ODM XML 1.3 Full with OpenClinica extensions. Includes discrepancy notes
* and audit trails.",
* "exportname": "[odm1.3_full$datasetName_$dateTime.xml]",
* "success": "The extract completed successfully. The file is available for download $linkURL.",
* "odmType": "full"
* },
* "extract.2": {
* "zip": "true",
* "failure": "",
* "fileDescription": "CDISC ODM XML 1.3 Clinical Data with OpenClinica extensions",
* "linkText": "Run Now",
* "deleteOld": "true",
* "location": "$exportFilePath/$datasetName/ODM_1.3_Extensions",
* "file": "[copyXML.xsl]",
* "helpText": "CDISC ODM XML 1.3 Clinical Data with OpenClinica extensions. Does not include
* discrepancy notes or audit trails.",
* "exportname": "[odm1.3_clinical_ext_$datasetName_$dateTime.xml]",
* "success": "Your extract job completed successfully. The file is available for download
* $linkURL.",
* "odmType": "clinical_data"
* }
* ...
* }
* }
*/
@RequestMapping(value = "/extract", method = RequestMethod.GET)
public ResponseEntity<HashMap> getExtractModule() throws Exception {
ResourceBundleProvider.updateLocale(new Locale("en_US"));
HashMap<String, Object> map = new HashMap<>();
ResourceBundle resLicense = ResourceBundleProvider.getLicensingBundle();
HashMap<String, Object> extractMap = new HashMap<>();
ArrayList<ExtractPropertyBean> extracts = CoreResources.getExtractProperties();
int n = 0;
for (ExtractPropertyBean extract : extracts) {
n++;
HashMap<String, String> extractmap = new HashMap<>();
extractmap.put("odmType", extract.getOdmType());
extractmap.put("file", Arrays.toString(extract.getFileName()));
extractmap.put("fileDescription", extract.getFiledescription());
extractmap.put("linkText", extract.getLinkText());
extractmap.put("helpText", extract.getHelpText());
extractmap.put("location", extract.getFileLocation());
extractmap.put("exportname", Arrays.toString(extract.getExportFileName()));
extractmap.put("zip", String.valueOf(extract.getZipFormat()));
extractmap.put("deleteOld", String.valueOf(extract.getDeleteOld()));
extractmap.put("success", extract.getSuccessMessage());
extractmap.put("failure", extract.getFailureMessage());
extractMap.put("extract." + n, extractmap);
}
HashMap<String, String> extractDatamart = new HashMap<>();
HashMap<String, String> datamartRole = new HashMap<>();
String username = CoreResources.getExtractField("db1.username");
String password = CoreResources.getExtractField("db1.password");
String url = CoreResources.getExtractField("db1.url");
extractDatamart.put("db1.username", username);
extractDatamart.put("db1.url", url);
extractDatamart.put("db1.dataBase", CoreResources.getExtractField("db1.dataBase"));
HashMap<String, String> extractNumber = new HashMap<>();
extractNumber.put("extract.number", CoreResources.getExtractField("extract.number"));
extractMap.put("extract.number", extractNumber);
// extractMap.put("DataMart", extractDatamart);
HashMap<String, String> datamartMap = new HashMap();
try (Connection conn = DriverManager.getConnection(url, username, password)) {
datamartRole = getDbRoleProperties(conn, datamartRole, username);
datamartMap.put("connection", "Open");
} catch (Exception e) {
datamartMap.put("connection", "Close");
}
// map.put("Datamart Facts", datamartMap);
map.put("extract.properties", extractMap);
return new ResponseEntity<HashMap>(map, org.springframework.http.HttpStatus.OK);
}
use of org.akaza.openclinica.bean.extract.ExtractPropertyBean in project OpenClinica by OpenClinica.
the class SystemController method getDatamartModule.
public HashMap<String, Object> getDatamartModule(StudyBean studyBean) {
HashMap<String, String> datamartRole = new HashMap<>();
String username = CoreResources.getExtractField("db1.username");
String password = CoreResources.getExtractField("db1.password");
String url = CoreResources.getExtractField("db1.url");
ArrayList<ExtractPropertyBean> extracts = CoreResources.getExtractProperties();
String enabled = "False";
for (ExtractPropertyBean extract : extracts) {
if (extract.getFiledescription().equalsIgnoreCase("Datamart")) {
enabled = "True";
break;
}
}
String result = "";
try (Connection conn = DriverManager.getConnection(url, username, password)) {
datamartRole = getDbRoleProperties(conn, datamartRole, username);
result = "ACTIVE";
} catch (Exception e) {
result = "INACTIVE";
}
HashMap<String, Object> mapMetadata = new HashMap<>();
mapMetadata.put("db1.username", username);
mapMetadata.put("db1.url", url);
mapMetadata.put("db1.dataBase", CoreResources.getExtractField("db1.dataBase"));
mapMetadata.put("Role Properties", datamartRole);
HashMap<String, Object> mapDatamart = new HashMap<>();
mapDatamart.put("enabled", enabled);
mapDatamart.put("status", result);
mapDatamart.put("metadata", mapMetadata);
HashMap<String, Object> mapModule = new HashMap<>();
mapModule.put("Datamart", mapDatamart);
return mapModule;
}
Aggregations