use of org.apache.commons.vfs2.UserAuthenticationData.Type in project pentaho-kettle by pentaho.
the class CsvInput method readOneRow.
/**
* Read a single row of data from the file...
*
* @param skipRow if row should be skipped: header row or part of row in case of parallel read
* @param ignoreEnclosures if enclosures should be ignored, i.e. in case of we need to skip part of the row during
* parallel read
* @return a row of data...
* @throws KettleException
*/
private Object[] readOneRow(boolean skipRow, boolean ignoreEnclosures) throws KettleException {
try {
Object[] outputRowData = RowDataUtil.allocateRowData(data.outputRowMeta.size());
int outputIndex = 0;
boolean newLineFound = false;
boolean endOfBuffer = false;
List<Exception> conversionExceptions = null;
List<ValueMetaInterface> exceptionFields = null;
// Set file format to mixed if empty
if (StringUtils.isBlank(meta.getFileFormat())) {
meta.setFileFormat("mixed");
}
//
while (!newLineFound && outputIndex < data.fieldsMapping.size()) {
if (data.resizeBufferIfNeeded()) {
// there is no end of line delimiter
if (outputRowData != null) {
// filling the rest of them with null
if (outputIndex > 0) {
//
if (meta.isIncludingFilename() && !Utils.isEmpty(meta.getFilenameField())) {
if (meta.isLazyConversionActive()) {
outputRowData[data.filenameFieldIndex] = data.binaryFilename;
} else {
outputRowData[data.filenameFieldIndex] = data.filenames[data.filenr - 1];
}
}
if (data.isAddingRowNumber) {
outputRowData[data.rownumFieldIndex] = data.rowNumber++;
}
incrementLinesInput();
return outputRowData;
}
}
// nothing more to read, call it a day.
return null;
}
// OK, at this point we should have data in the byteBuffer and we should be able to scan for the next
// delimiter (;)
// So let's look for a delimiter.
// Also skip over the enclosures ("), it is NOT taking into account escaped enclosures.
// Later we can add an option for having escaped or double enclosures in the file. <sigh>
//
boolean delimiterFound = false;
boolean enclosureFound = false;
boolean doubleLineEnd = false;
int escapedEnclosureFound = 0;
boolean ignoreEnclosuresInField = ignoreEnclosures;
while (!delimiterFound && !newLineFound && !endOfBuffer) {
//
if (data.delimiterFound()) {
delimiterFound = true;
} else if ((!meta.isNewlinePossibleInFields() || outputIndex == data.fieldsMapping.size() - 1) && data.newLineFound()) {
// Perhaps we found a (pre-mature) new line?
//
// In case we are not using an enclosure and in case fields contain new lines
// we need to make sure that we check the newlines possible flag.
// If the flag is enable we skip newline checking except for the last field in the row.
// In that one we can't support newlines without enclosure (handled below).
//
newLineFound = true;
// Skip new line character
for (int i = 0; i < data.encodingType.getLength(); i++) {
data.moveEndBufferPointer();
}
// Re-check for double new line (\r\n)...
if (data.newLineFound()) {
// Found another one, need to skip it later
doubleLineEnd = true;
}
} else if (data.enclosureFound() && !ignoreEnclosuresInField) {
int enclosurePosition = data.getEndBuffer();
int fieldFirstBytePosition = data.getStartBuffer();
if (fieldFirstBytePosition == enclosurePosition) {
// Perhaps we need to skip over an enclosed part?
// We always expect exactly one enclosure character
// If we find the enclosure doubled, we consider it escaped.
// --> "" is converted to " later on.
//
enclosureFound = true;
boolean keepGoing;
do {
if (data.moveEndBufferPointer()) {
enclosureFound = false;
break;
}
keepGoing = !data.enclosureFound();
if (!keepGoing) {
// Read another byte...
if (!data.endOfBuffer() && data.moveEndBufferPointer()) {
break;
}
if (data.enclosure.length > 1) {
data.moveEndBufferPointer();
}
// If this character is also an enclosure, we can consider the enclosure "escaped".
// As such, if this is an enclosure, we keep going...
//
keepGoing = data.enclosureFound();
if (keepGoing) {
escapedEnclosureFound++;
}
}
} while (keepGoing);
//
if (data.endOfBuffer()) {
endOfBuffer = true;
break;
}
} else {
// Ignoring enclosure if it's not at the field start
ignoreEnclosuresInField = true;
}
} else {
if (data.moveEndBufferPointer()) {
endOfBuffer = true;
break;
}
}
}
// If we're still here, we found a delimiter...
// Since the starting point never changed really, we just can grab range:
//
// [startBuffer-endBuffer[
//
// This is the part we want.
// data.byteBuffer[data.startBuffer]
//
byte[] field = data.getField(delimiterFound, enclosureFound, newLineFound, endOfBuffer);
//
if (escapedEnclosureFound > 0) {
if (log.isRowLevel()) {
logRowlevel("Escaped enclosures found in " + new String(field));
}
field = data.removeEscapedEnclosures(field, escapedEnclosureFound);
}
final int currentFieldIndex = outputIndex++;
final int actualFieldIndex = data.fieldsMapping.fieldMetaIndex(currentFieldIndex);
if (actualFieldIndex != FieldsMapping.FIELD_DOES_NOT_EXIST) {
if (!skipRow) {
if (meta.isLazyConversionActive()) {
outputRowData[actualFieldIndex] = field;
} else {
// We're not lazy so we convert the data right here and now.
// The convert object uses binary storage as such we just have to ask the native type from it.
// That will do the actual conversion.
//
ValueMetaInterface sourceValueMeta = data.convertRowMeta.getValueMeta(actualFieldIndex);
try {
outputRowData[actualFieldIndex] = sourceValueMeta.convertBinaryStringToNativeType(field);
} catch (KettleValueException e) {
// There was a conversion error,
//
outputRowData[actualFieldIndex] = null;
if (conversionExceptions == null) {
conversionExceptions = new ArrayList<Exception>();
exceptionFields = new ArrayList<ValueMetaInterface>();
}
conversionExceptions.add(e);
exceptionFields.add(sourceValueMeta);
}
}
} else {
// nothing for the header, no conversions here.
outputRowData[actualFieldIndex] = null;
}
}
// empty column at the end of the row (see the Jira case for details)
if ((!newLineFound && outputIndex < data.fieldsMapping.size()) || (newLineFound && doubleLineEnd)) {
int i = 0;
while ((!data.newLineFound() && (i < data.delimiter.length))) {
data.moveEndBufferPointer();
i++;
}
switch(meta.getFileFormatTypeNr()) {
case TextFileInputMeta.FILE_FORMAT_DOS:
if (data.newLineFound()) {
if (doubleLineEnd == true) {
data.moveEndBufferPointerXTimes(data.encodingType.getLength());
} else {
// Re-check for a new Line
data.moveEndBufferPointerXTimes(data.encodingType.getLength());
if (!data.newLineFound()) {
throw new KettleFileException(BaseMessages.getString(PKG, "TextFileInput.Log.SingleLineFound"));
}
}
}
break;
case TextFileInputMeta.FILE_FORMAT_MIXED:
if (data.isCarriageReturn() || doubleLineEnd) {
data.moveEndBufferPointerXTimes(data.encodingType.getLength());
}
break;
}
}
data.setStartBuffer(data.getEndBuffer());
}
//
if (!newLineFound && !data.resizeBufferIfNeeded()) {
do {
data.moveEndBufferPointer();
if (data.resizeBufferIfNeeded()) {
// nothing more to read.
break;
}
// TODO: if we're using quoting we might be dealing with a very dirty file with quoted newlines in trailing
// fields. (imagine that)
// In that particular case we want to use the same logic we use above (refactored a bit) to skip these fields.
} while (!data.newLineFound());
if (!data.resizeBufferIfNeeded()) {
while (data.newLineFound()) {
data.moveEndBufferPointer();
if (data.resizeBufferIfNeeded()) {
// nothing more to read.
break;
}
}
}
// Make sure we start at the right position the next time around.
data.setStartBuffer(data.getEndBuffer());
}
//
if (meta.isIncludingFilename() && !Utils.isEmpty(meta.getFilenameField())) {
if (meta.isLazyConversionActive()) {
outputRowData[data.filenameFieldIndex] = data.binaryFilename;
} else {
outputRowData[data.filenameFieldIndex] = data.filenames[data.filenr - 1];
}
}
if (data.isAddingRowNumber) {
outputRowData[data.rownumFieldIndex] = data.rowNumber++;
}
if (!ignoreEnclosures) {
incrementLinesInput();
}
if (conversionExceptions != null && conversionExceptions.size() > 0) {
//
throw new KettleConversionException("There were " + conversionExceptions.size() + " conversion errors on line " + getLinesInput(), conversionExceptions, exceptionFields, outputRowData);
}
return outputRowData;
} catch (KettleConversionException e) {
throw e;
} catch (IOException e) {
throw new KettleFileException("Exception reading line using NIO", e);
}
}
use of org.apache.commons.vfs2.UserAuthenticationData.Type in project pentaho-kettle by pentaho.
the class CsvInput method processRow.
public boolean processRow(StepMetaInterface smi, StepDataInterface sdi) throws KettleException {
meta = (CsvInputMeta) smi;
data = (CsvInputData) sdi;
if (first) {
first = false;
data.outputRowMeta = new RowMeta();
meta.getFields(data.outputRowMeta, getStepname(), null, null, this, repository, metaStore);
if (data.filenames == null) {
// We're expecting the list of filenames from the previous step(s)...
//
getFilenamesFromPreviousSteps();
}
// We only run in parallel if we have at least one file to process
// AND if we have more than one step copy running...
//
data.parallel = meta.isRunningInParallel() && data.totalNumberOfSteps > 1;
// The conversion logic for when the lazy conversion is turned of is simple:
// Pretend it's a lazy conversion object anyway and get the native type during conversion.
//
data.convertRowMeta = data.outputRowMeta.clone();
for (ValueMetaInterface valueMeta : data.convertRowMeta.getValueMetaList()) {
valueMeta.setStorageType(ValueMetaInterface.STORAGE_TYPE_BINARY_STRING);
}
// Calculate the indexes for the filename and row number fields
//
data.filenameFieldIndex = -1;
if (!Utils.isEmpty(meta.getFilenameField()) && meta.isIncludingFilename()) {
data.filenameFieldIndex = meta.getInputFields().length;
}
data.rownumFieldIndex = -1;
if (!Utils.isEmpty(meta.getRowNumField())) {
data.rownumFieldIndex = meta.getInputFields().length;
if (data.filenameFieldIndex >= 0) {
data.rownumFieldIndex++;
}
}
//
if (data.parallel) {
prepareToRunInParallel();
}
//
if (!openNextFile()) {
setOutputDone();
// nothing to see here, move along...
return false;
}
}
//
if (data.parallel) {
if (data.totalBytesRead >= data.blockToRead) {
// stop reading
setOutputDone();
return false;
}
}
try {
// get row, set busy!
Object[] outputRowData = readOneRow(false, false);
// no more input to be expected...
if (outputRowData == null) {
if (openNextFile()) {
// try again on the next loop...
return true;
} else {
// last file, end here
setOutputDone();
return false;
}
} else {
// copy row to possible alternate rowset(s).
putRow(data.outputRowMeta, outputRowData);
if (checkFeedback(getLinesInput())) {
if (log.isBasic()) {
logBasic(BaseMessages.getString(PKG, "CsvInput.Log.LineNumber", Long.toString(getLinesInput())));
}
}
}
} catch (KettleConversionException e) {
if (getStepMeta().isDoingErrorHandling()) {
StringBuilder errorDescriptions = new StringBuilder(100);
StringBuilder errorFields = new StringBuilder(50);
for (int i = 0; i < e.getCauses().size(); i++) {
if (i > 0) {
errorDescriptions.append(", ");
errorFields.append(", ");
}
errorDescriptions.append(e.getCauses().get(i).getMessage());
errorFields.append(e.getFields().get(i).toStringMeta());
}
putError(data.outputRowMeta, e.getRowData(), e.getCauses().size(), errorDescriptions.toString(), errorFields.toString(), "CSVINPUT001");
} else {
//
throw new KettleException(e.getMessage(), e.getCauses().get(0));
}
}
return true;
}
use of org.apache.commons.vfs2.UserAuthenticationData.Type in project pentaho-kettle by pentaho.
the class ParGzipCsvInput method readOneRow.
/**
* Read a single row of data from the file...
*
* @param doConversions
* if you want to do conversions, set to false for the header row.
* @return a row of data...
* @throws KettleException
*/
private Object[] readOneRow(boolean doConversions) throws KettleException {
//
if (data.totalBytesRead > data.blockSize) {
//
return null;
}
try {
Object[] outputRowData = RowDataUtil.allocateRowData(data.outputRowMeta.size());
int outputIndex = 0;
boolean newLineFound = false;
int newLines = 0;
//
while (!newLineFound && outputIndex < meta.getInputFields().length) {
if (checkBufferSize()) {
// there is no end of line delimiter
if (outputRowData != null) {
// filling the rest of them with null
if (outputIndex > 0) {
return (outputRowData);
}
}
// nothing more to read, call it a day.
return null;
}
// OK, at this point we should have data in the byteBuffer and we should be able to scan for the next
// delimiter (;)
// So let's look for a delimiter.
// Also skip over the enclosures ("), it is NOT taking into account escaped enclosures.
// Later we can add an option for having escaped or double enclosures in the file. <sigh>
//
boolean delimiterFound = false;
boolean enclosureFound = false;
int escapedEnclosureFound = 0;
while (!delimiterFound) {
//
if (data.byteBuffer[data.endBuffer] == data.delimiter[0]) {
delimiterFound = true;
} else if (data.byteBuffer[data.endBuffer] == '\n' || data.byteBuffer[data.endBuffer] == '\r') {
// Perhaps we found a new line?
// "\n\r".getBytes()
//
data.endBuffer++;
data.totalBytesRead++;
newLines = 1;
if (!checkBufferSize()) {
// re-check for double delimiters...
if (data.byteBuffer[data.endBuffer] == '\n' || data.byteBuffer[data.endBuffer] == '\r') {
data.endBuffer++;
data.totalBytesRead++;
newLines = 2;
checkBufferSize();
}
}
newLineFound = true;
delimiterFound = true;
} else if (data.enclosure != null && data.byteBuffer[data.endBuffer] == data.enclosure[0]) {
// Perhaps we need to skip over an enclosed part?
// We always expect exactly one enclosure character
// If we find the enclosure doubled, we consider it escaped.
// --> "" is converted to " later on.
//
enclosureFound = true;
boolean keepGoing;
do {
data.endBuffer++;
if (checkBufferSize()) {
enclosureFound = false;
break;
}
keepGoing = data.byteBuffer[data.endBuffer] != data.enclosure[0];
if (!keepGoing) {
// We found an enclosure character.
// Read another byte...
//
data.endBuffer++;
if (checkBufferSize()) {
enclosureFound = false;
break;
}
// If this character is also an enclosure, we can consider the enclosure "escaped".
// As such, if this is an enclosure, we keep going...
//
keepGoing = data.byteBuffer[data.endBuffer] == data.enclosure[0];
if (keepGoing) {
escapedEnclosureFound++;
}
}
} while (keepGoing);
//
if (data.endBuffer >= data.bufferSize) {
// consider it a newline to break out of the upper while loop
newLineFound = true;
// to remove the enclosures in case of missing newline on last line.
newLines += 2;
break;
}
} else {
data.endBuffer++;
data.totalBytesRead++;
if (checkBufferSize()) {
if (data.endBuffer >= data.bufferSize) {
newLineFound = true;
break;
}
}
}
}
// If we're still here, we found a delimiter..
// Since the starting point never changed really, we just can grab range:
//
// [startBuffer-endBuffer[
//
// This is the part we want.
//
int length = data.endBuffer - data.startBuffer;
if (newLineFound) {
length -= newLines;
if (length <= 0) {
length = 0;
}
}
if (enclosureFound) {
data.startBuffer++;
length -= 2;
if (length <= 0) {
length = 0;
}
}
if (length <= 0) {
length = 0;
}
byte[] field = new byte[length];
System.arraycopy(data.byteBuffer, data.startBuffer, field, 0, length);
//
if (escapedEnclosureFound > 0) {
if (log.isRowLevel()) {
logRowlevel("Escaped enclosures found in " + new String(field));
}
field = data.removeEscapedEnclosures(field, escapedEnclosureFound);
}
if (doConversions) {
if (meta.isLazyConversionActive()) {
outputRowData[outputIndex++] = field;
} else {
// We're not lazy so we convert the data right here and now.
// The convert object uses binary storage as such we just have to ask the native type from it.
// That will do the actual conversion.
//
ValueMetaInterface sourceValueMeta = data.convertRowMeta.getValueMeta(outputIndex);
outputRowData[outputIndex++] = sourceValueMeta.convertBinaryStringToNativeType(field);
}
} else {
// nothing for the header, no conversions here.
outputRowData[outputIndex++] = null;
}
// OK, move on to the next field...
if (!newLineFound) {
data.endBuffer++;
data.totalBytesRead++;
}
data.startBuffer = data.endBuffer;
}
//
if (!newLineFound && !checkBufferSize()) {
do {
data.endBuffer++;
data.totalBytesRead++;
if (checkBufferSize()) {
// nothing more to read.
break;
}
// TODO: if we're using quoting we might be dealing with a very dirty file with quoted newlines in trailing
// fields. (imagine that)
// In that particular case we want to use the same logic we use above (refactored a bit) to skip these fields.
} while (data.byteBuffer[data.endBuffer] != '\n' && data.byteBuffer[data.endBuffer] != '\r');
if (!checkBufferSize()) {
while (data.byteBuffer[data.endBuffer] == '\n' || data.byteBuffer[data.endBuffer] == '\r') {
data.endBuffer++;
data.totalBytesRead++;
if (checkBufferSize()) {
// nothing more to read.
break;
}
}
}
// Make sure we start at the right position the next time around.
data.startBuffer = data.endBuffer;
}
//
if (meta.isIncludingFilename() && !Utils.isEmpty(meta.getFilenameField())) {
if (meta.isLazyConversionActive()) {
outputRowData[data.filenameFieldIndex] = data.binaryFilename;
} else {
outputRowData[data.filenameFieldIndex] = data.filenames[data.filenr - 1];
}
}
if (data.isAddingRowNumber) {
outputRowData[data.rownumFieldIndex] = new Long(data.rowNumber++);
}
incrementLinesInput();
return outputRowData;
} catch (Exception e) {
throw new KettleFileException("Exception reading line of data", e);
}
}
use of org.apache.commons.vfs2.UserAuthenticationData.Type in project pentaho-kettle by pentaho.
the class FileInputList method createFolderList.
public static FileInputList createFolderList(VariableSpace space, String[] folderName, String[] folderRequired) {
FileInputList fileInputList = new FileInputList();
// Replace possible environment variables...
final String[] realfolder = space.environmentSubstitute(folderName);
for (int i = 0; i < realfolder.length; i++) {
final String onefile = realfolder[i];
final boolean onerequired = YES.equalsIgnoreCase(folderRequired[i]);
final boolean subdirs = true;
final FileTypeFilter filter = FileTypeFilter.ONLY_FOLDERS;
if (Utils.isEmpty(onefile)) {
continue;
}
FileObject directoryFileObject = null;
try {
// Find all folder names in this directory
//
directoryFileObject = KettleVFS.getFileObject(onefile, space);
if (directoryFileObject != null && directoryFileObject.getType() == FileType.FOLDER) {
// it's a directory
FileObject[] fileObjects = directoryFileObject.findFiles(new AllFileSelector() {
@Override
public boolean traverseDescendents(FileSelectInfo info) {
return (info.getDepth() == 0 || subdirs) && // Check if one has permission to list this folder
hasAccess(info.getFile());
}
private boolean hasAccess(FileObject fileObject) {
try {
if (fileObject instanceof LocalFile) {
// fileObject.isReadable wrongly returns true in windows file system even if not readable
return Files.isReadable(Paths.get((new File(fileObject.getName().getPath())).toURI()));
}
return fileObject.isReadable();
} catch (FileSystemException e) {
// Something went wrong... well, let's assume "no access"!
return false;
}
}
@Override
public boolean includeFile(FileSelectInfo info) {
// Never return the parent directory of a file list.
if (info.getDepth() == 0) {
return false;
}
FileObject fileObject = info.getFile();
try {
return (fileObject != null && // Is this an allowed type?
filter.isFileTypeAllowed(fileObject.getType()) && // Check if one has permission to access it
hasAccess(fileObject));
} catch (IOException ex) {
// Upon error don't process the file.
return false;
}
}
});
if (fileObjects != null) {
for (int j = 0; j < fileObjects.length; j++) {
if (fileObjects[j].exists()) {
fileInputList.addFile(fileObjects[j]);
}
}
}
if (Utils.isEmpty(fileObjects)) {
if (onerequired) {
fileInputList.addNonAccessibleFile(directoryFileObject);
}
}
// Sort the list: quicksort, only for regular files
fileInputList.sortFiles();
} else {
if (onerequired && !directoryFileObject.exists()) {
fileInputList.addNonExistantFile(directoryFileObject);
}
}
} catch (Exception e) {
log.logError(Const.getStackTracker(e));
} finally {
try {
if (directoryFileObject != null) {
directoryFileObject.close();
}
directoryFileObject = null;
} catch (Exception e) {
// Ignore
}
}
}
return fileInputList;
}
use of org.apache.commons.vfs2.UserAuthenticationData.Type in project pentaho-kettle by pentaho.
the class AddExportServlet method doGet.
/**
* <div id="mindtouch">
* <h1>/kettle/addExport</h1>
* <a name="POST"></a>
* <h2>POST</h2>
* <p>Returns the list of users in the platform. This list is in an xml format as shown in the example response.
* Uploads and executes previously exported job or transformation.
* Uploads zip file containing job or transformation to be executed and executes it.
* Method relies on the input parameters to find the entity to be executed. The archive is
* transferred within request body.
*
* <code>File url of the executed entity </code> will be returned in the Response object
* or <code>message</code> describing error occurred. To determine if the call is successful
* rely on <code>result</code> parameter in response.</p>
*
* <p><b>Example Request:</b><br />
* <pre function="syntax.xml">
* POST /kettle/addExport/?type=job&load=dummy_job.kjb
* </pre>
* Request body should contain zip file prepared for Carte execution.
* </p>
* <h3>Parameters</h3>
* <table class="pentaho-table">
* <tbody>
* <tr>
* <th>name</th>
* <th>description</th>
* <th>type</th>
* </tr>
* <tr>
* <td>type</td>
* <td>The type of the entity to be executed either <code>job</code> or <code>trans</code>.</td>
* <td>query</td>
* </tr>
* <tr>
* <td>load</td>
* <td>The name of the entity within archive to be executed.</td>
* <td>query</td>
* </tr>
* </tbody>
* </table>
*
* <h3>Response Body</h3>
*
* <table class="pentaho-table">
* <tbody>
* <tr>
* <td align="right">element:</td>
* <td>(custom)</td>
* </tr>
* <tr>
* <td align="right">media types:</td>
* <td>application/xml</td>
* </tr>
* </tbody>
* </table>
* <p>Response wraps file url of the entity that was executed or error stack trace if an error occurred.
* Response has <code>result</code> OK if there were no errors. Otherwise it returns ERROR.</p>
*
* <p><b>Example Response:</b></p>
* <pre function="syntax.xml">
* <?xml version="1.0" encoding="UTF-8"?>
* <webresult>
* <result>OK</result>
* <message>zip:file:///temp/export_ee2a67de-6a72-11e4-82c0-4701a2bac6a5.zip!dummy_job.kjb</message>
* <id>74cf4219-c881-4633-a71a-2ed16b7db7b8</id>
* </webresult>
* </pre>
*
* <h3>Status Codes</h3>
* <table class="pentaho-table">
* <tbody>
* <tr>
* <th>code</th>
* <th>description</th>
* </tr>
* <tr>
* <td>200</td>
* <td>Request was processed and XML response is returned.</td>
* </tr>
* <tr>
* <td>500</td>
* <td>Internal server error occurs during request processing.</td>
* </tr>
* </tbody>
*</table>
*</div>
*/
public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
if (isJettyMode() && !request.getRequestURI().startsWith(CONTEXT_PATH)) {
return;
}
if (log.isDebug()) {
logDebug("Addition of export requested");
}
PrintWriter out = response.getWriter();
// read from the client
InputStream in = request.getInputStream();
if (log.isDetailed()) {
logDetailed("Encoding: " + request.getCharacterEncoding());
}
boolean isJob = TYPE_JOB.equalsIgnoreCase(request.getParameter(PARAMETER_TYPE));
// the resource to load
String load = request.getParameter(PARAMETER_LOAD);
response.setContentType("text/xml");
out.print(XMLHandler.getXMLHeader());
response.setStatus(HttpServletResponse.SC_OK);
OutputStream outputStream = null;
try {
FileObject tempFile = KettleVFS.createTempFile("export", ".zip", System.getProperty("java.io.tmpdir"));
outputStream = KettleVFS.getOutputStream(tempFile, false);
// Pass the input directly to a temporary file
//
// int size = 0;
int c;
while ((c = in.read()) != -1) {
outputStream.write(c);
// size++;
}
outputStream.flush();
outputStream.close();
// don't close it twice
outputStream = null;
String archiveUrl = tempFile.getName().toString();
String fileUrl = null;
String carteObjectId = null;
SimpleLoggingObject servletLoggingObject = new SimpleLoggingObject(CONTEXT_PATH, LoggingObjectType.CARTE, null);
//
if (!Utils.isEmpty(load)) {
fileUrl = "zip:" + archiveUrl + "!" + load;
if (isJob) {
// Open the job from inside the ZIP archive
//
KettleVFS.getFileObject(fileUrl);
// never with a repository
JobMeta jobMeta = new JobMeta(fileUrl, null);
// Also read the execution configuration information
//
String configUrl = "zip:" + archiveUrl + "!" + Job.CONFIGURATION_IN_EXPORT_FILENAME;
Document configDoc = XMLHandler.loadXMLFile(configUrl);
JobExecutionConfiguration jobExecutionConfiguration = new JobExecutionConfiguration(XMLHandler.getSubNode(configDoc, JobExecutionConfiguration.XML_TAG));
carteObjectId = UUID.randomUUID().toString();
servletLoggingObject.setContainerObjectId(carteObjectId);
servletLoggingObject.setLogLevel(jobExecutionConfiguration.getLogLevel());
Job job = new Job(null, jobMeta, servletLoggingObject);
//
if (jobExecutionConfiguration.isExpandingRemoteJob()) {
job.addDelegationListener(new CarteDelegationHandler(getTransformationMap(), getJobMap()));
}
// store it all in the map...
//
getJobMap().addJob(job.getJobname(), carteObjectId, job, new JobConfiguration(jobMeta, jobExecutionConfiguration));
// Apply the execution configuration...
//
log.setLogLevel(jobExecutionConfiguration.getLogLevel());
job.setArguments(jobExecutionConfiguration.getArgumentStrings());
jobMeta.injectVariables(jobExecutionConfiguration.getVariables());
// Also copy the parameters over...
//
Map<String, String> params = jobExecutionConfiguration.getParams();
for (Map.Entry<String, String> entry : params.entrySet()) {
jobMeta.setParameterValue(entry.getKey(), entry.getValue());
}
} else {
// Open the transformation from inside the ZIP archive
//
TransMeta transMeta = new TransMeta(fileUrl);
// Also read the execution configuration information
//
String configUrl = "zip:" + archiveUrl + "!" + Trans.CONFIGURATION_IN_EXPORT_FILENAME;
Document configDoc = XMLHandler.loadXMLFile(configUrl);
TransExecutionConfiguration executionConfiguration = new TransExecutionConfiguration(XMLHandler.getSubNode(configDoc, TransExecutionConfiguration.XML_TAG));
carteObjectId = UUID.randomUUID().toString();
servletLoggingObject.setContainerObjectId(carteObjectId);
servletLoggingObject.setLogLevel(executionConfiguration.getLogLevel());
Trans trans = new Trans(transMeta, servletLoggingObject);
// store it all in the map...
//
getTransformationMap().addTransformation(trans.getName(), carteObjectId, trans, new TransConfiguration(transMeta, executionConfiguration));
}
} else {
fileUrl = archiveUrl;
}
out.println(new WebResult(WebResult.STRING_OK, fileUrl, carteObjectId));
} catch (Exception ex) {
out.println(new WebResult(WebResult.STRING_ERROR, Const.getStackTracker(ex)));
} finally {
if (outputStream != null) {
outputStream.close();
}
}
}
Aggregations