use of org.akaza.openclinica.dao.extract.DatasetDAO in project OpenClinica by OpenClinica.
the class ExtractController method processSubmit.
/**
* process the page from whence you came, i.e. extract a dataset
* @param id, the id of the extract properties bean, gained from Core Resources
* @param datasetId, the id of the dataset, found through DatasetDAO
* @param request, http request
* @return model map, but more importantly, creates a quartz job which runs right away and generates all output there
*/
@RequestMapping(method = RequestMethod.GET)
public ModelMap processSubmit(@RequestParam("id") String id, @RequestParam("datasetId") String datasetId, HttpServletRequest request, HttpServletResponse response) {
if (!mayProceed(request)) {
try {
response.sendRedirect(request.getContextPath() + "/MainMenu?message=authentication_failed");
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
ModelMap map = new ModelMap();
ResourceBundleProvider.updateLocale(LocaleResolver.getLocale(request));
// String datasetId = (String)request.getAttribute("datasetId");
// String id = (String)request.getAttribute("id");
logger.debug("found both id " + id + " and dataset " + datasetId);
ExtractUtils extractUtils = new ExtractUtils();
// get extract id
// get dataset id
// if id is a number and dataset id is a number ...
datasetDao = new DatasetDAO(dataSource);
UserAccountBean userBean = (UserAccountBean) request.getSession().getAttribute("userBean");
CoreResources cr = new CoreResources();
ExtractPropertyBean epBean = cr.findExtractPropertyBeanById(new Integer(id).intValue(), datasetId);
DatasetBean dsBean = (DatasetBean) datasetDao.findByPK(new Integer(datasetId).intValue());
// set the job in motion
String[] files = epBean.getFileName();
String exportFileName;
int fileSize = files.length;
int cnt = 0;
SimpleTrigger simpleTrigger = null;
//TODO: if files and export names size is not same... throw an error
dsBean.setName(dsBean.getName().replaceAll(" ", "_"));
String[] exportFiles = epBean.getExportFileName();
String pattern = "yyyy" + File.separator + "MM" + File.separator + "dd" + File.separator + "HHmmssSSS" + File.separator;
SimpleDateFormat sdfDir = new SimpleDateFormat(pattern);
int i = 0;
String[] temp = new String[exportFiles.length];
//JN: The following logic is for comma separated variables, to avoid the second file be treated as a old file and deleted.
while (i < exportFiles.length) {
temp[i] = resolveVars(exportFiles[i], dsBean, sdfDir, SQLInitServlet.getField("filePath"), extractUtils);
i++;
}
epBean.setDoNotDelFiles(temp);
epBean.setExportFileName(temp);
XsltTriggerService xsltService = new XsltTriggerService();
// TODO get a user bean somehow?
String generalFileDir = SQLInitServlet.getField("filePath");
generalFileDir = generalFileDir + "datasets" + File.separator + dsBean.getId() + File.separator + sdfDir.format(new java.util.Date());
exportFileName = epBean.getExportFileName()[cnt];
// need to set the dataset path here, tbh
logger.debug("found odm xml file path " + generalFileDir);
// next, can already run jobs, translations, and then add a message to be notified later
//JN all the properties need to have the variables...
String xsltPath = SQLInitServlet.getField("filePath") + "xslt" + File.separator + files[cnt];
String endFilePath = epBean.getFileLocation();
endFilePath = getEndFilePath(endFilePath, dsBean, sdfDir, SQLInitServlet.getField("filePath"), extractUtils);
// exportFileName = resolveVars(exportFileName,dsBean,sdfDir);
if (epBean.getPostProcExportName() != null) {
//String preProcExportPathName = getEndFilePath(epBean.getPostProcExportName(),dsBean,sdfDir);
String preProcExportPathName = resolveVars(epBean.getPostProcExportName(), dsBean, sdfDir, SQLInitServlet.getField("filePath"), extractUtils);
epBean.setPostProcExportName(preProcExportPathName);
}
if (epBean.getPostProcLocation() != null) {
String prePocLoc = getEndFilePath(epBean.getPostProcLocation(), dsBean, sdfDir, SQLInitServlet.getField("filePath"), extractUtils);
epBean.setPostProcLocation(prePocLoc);
}
setAllProps(epBean, dsBean, sdfDir, extractUtils);
// also need to add the status fields discussed w/ cc:
// result code, user message, optional URL, archive message, log file message
// asdf table: sort most recent at top
logger.debug("found xslt file name " + xsltPath);
// String xmlFilePath = generalFileDir + ODMXMLFileName;
simpleTrigger = xsltService.generateXsltTrigger(scheduler, xsltPath, // xml_file_path
generalFileDir, endFilePath + File.separator, exportFileName, dsBean.getId(), epBean, userBean, LocaleResolver.getLocale(request).getLanguage(), cnt, SQLInitServlet.getField("filePath") + "xslt", this.TRIGGER_GROUP_NAME);
// System.out.println("just set locale: " + LocaleResolver.getLocale(request).getLanguage());
cnt++;
ApplicationContext context = null;
try {
context = (ApplicationContext) scheduler.getContext().get("applicationContext");
} catch (SchedulerException e) {
e.printStackTrace();
}
//WebApplicationContext context = ContextLoader.getCurrentWebApplicationContext();
JobDetailFactoryBean jobDetailFactoryBean = context.getBean(JobDetailFactoryBean.class, simpleTrigger, this.TRIGGER_GROUP_NAME);
try {
Date dateStart = scheduler.scheduleJob(jobDetailFactoryBean.getObject(), simpleTrigger);
logger.debug("== found job date: " + dateStart.toString());
} catch (SchedulerException se) {
se.printStackTrace();
}
request.setAttribute("datasetId", datasetId);
// set the job name here in the user's session, so that we can ping the scheduler to pull it out later
if (jobDetailFactoryBean != null)
request.getSession().setAttribute("jobName", jobDetailFactoryBean.getObject().getKey().getName());
if (simpleTrigger != null)
request.getSession().setAttribute("groupName", this.TRIGGER_GROUP_NAME);
request.getSession().setAttribute("datasetId", new Integer(dsBean.getId()));
return map;
}
use of org.akaza.openclinica.dao.extract.DatasetDAO in project OpenClinica by OpenClinica.
the class AccessFileServlet method processRequest.
@Override
public void processRequest() throws Exception {
FormProcessor fp = new FormProcessor(request);
int fileId = fp.getInt("fileId");
ArchivedDatasetFileDAO asdfdao = new ArchivedDatasetFileDAO(sm.getDataSource());
DatasetDAO dsDao = new DatasetDAO(sm.getDataSource());
ArchivedDatasetFileBean asdfBean = (ArchivedDatasetFileBean) asdfdao.findByPK(fileId);
StudyDAO studyDao = new StudyDAO(sm.getDataSource());
DatasetBean dsBean = (DatasetBean) dsDao.findByPK(asdfBean.getDatasetId());
int parentId = currentStudy.getParentStudyId();
if (//Logged in at study level
parentId == 0) {
StudyBean studyBean = (StudyBean) studyDao.findByPK(dsBean.getStudyId());
//parent id of dataset created
parentId = studyBean.getParentStudyId();
}
//logic: is parentId of the dataset created not equal to currentstudy? or is current study
if (parentId != currentStudy.getId())
if (dsBean.getStudyId() != currentStudy.getId()) {
addPageMessage(respage.getString("no_have_correct_privilege_current_study") + respage.getString("change_study_contact_sysadmin"));
// TODO
throw new InsufficientPermissionException(Page.MENU_SERVLET, resexception.getString("not_allowed_access_extract_data_servlet"), "1");
}
// asdfBean.setWebPath(WEB_DIR+
// asdfBean.getDatasetId()+
// "/"+
// asdfBean.getName());
Page finalTarget = Page.EXPORT_DATA_CUSTOM;
/*
* if (asdfBean.getExportFormatId() ==
* ExportFormatBean.EXCELFILE.getId()) { //
* response.setContentType("application/octet-stream");
* response.setHeader("Content-Disposition", "attachment; filename=" +
* asdfBean.getName()); logger.info("found file name: "+
* finalTarget.getFileName()); //
* finalTarget.setFileName(asdfBean.getWebPath()); finalTarget =
* Page.GENERATE_EXCEL_DATASET; } else {
*/
logger.debug("found file reference: " + asdfBean.getFileReference() + " and file name: " + asdfBean.getName());
if (asdfBean.getFileReference().endsWith(".zip")) {
response.setHeader("Content-disposition", "attachment; filename=\"" + asdfBean.getName() + "\";");
response.setContentType("application/zip");
// response.setContentType("application/download");
} else if (asdfBean.getFileReference().endsWith(".pdf")) {
response.setHeader("Content-disposition", "attachment; filename=\"" + asdfBean.getName() + "\";");
response.setContentType("application/pdf");
// response.setContentType("application/download; application/pdf");
} else if (asdfBean.getFileReference().endsWith(".csv")) {
response.setHeader("Content-disposition", "attachment; filename=\"" + asdfBean.getName() + "\";");
response.setContentType("text/csv");
// response.setContentType("application/download; text/csv");
} else if (asdfBean.getFileReference().endsWith(".xml")) {
response.setHeader("Content-disposition", "attachment; filename=\"" + asdfBean.getName() + "\";");
response.setContentType("text/xml");
// response.setContentType("application/download; text/xml");
} else if (asdfBean.getFileReference().endsWith(".html")) {
response.setHeader("Content-disposition", "filename=\"" + asdfBean.getName() + "\";");
response.setContentType("text/html; charset=utf-8");
} else {
// response.setContentType("text/plain");
// to ensure backwards compatability to text files shown on server
// not needed anymore? tbh 10/2010
}
finalTarget.setFileName("/WEB-INF/jsp/extract/generatedFileDataset.jsp");
// }
// finalTarget.setFileName(asdfBean.getWebPath());
request.setAttribute("generate", asdfBean.getFileReference());
response.setHeader("Pragma", "public");
forwardPage(finalTarget);
}
Aggregations