use of org.pentaho.di.core.ResultFile in project pentaho-kettle by pentaho.
the class YamlInput method addFileToResultFilesname.
private void addFileToResultFilesname(FileObject file) throws Exception {
if (meta.addResultFile()) {
// Add this to the result file names...
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, file, getTransMeta().getName(), getStepname());
resultFile.setComment(BaseMessages.getString(PKG, "YamlInput.Log.FileAddedResult"));
addResultFile(resultFile);
}
}
use of org.pentaho.di.core.ResultFile in project pentaho-kettle by pentaho.
the class ZipFile method addFilenameToResult.
private void addFilenameToResult() throws FileSystemException {
if (meta.isaddTargetFileNametoResult()) {
// Add this to the result file names...
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, data.zipFile, getTransMeta().getName(), getStepname());
resultFile.setComment(BaseMessages.getString(PKG, "ZipFile.Log.FileAddedResult"));
addResultFile(resultFile);
if (log.isDetailed()) {
log.logDetailed(toString(), BaseMessages.getString(PKG, "ZipFile.Log.FilenameAddResult", data.sourceFile.toString()));
}
}
}
use of org.pentaho.di.core.ResultFile in project pentaho-kettle by pentaho.
the class TextFileInput method init.
@Override
public boolean init(StepMetaInterface smi, StepDataInterface sdi) {
meta = (TextFileInputMeta) smi;
data = (TextFileInputData) sdi;
if (super.init(smi, sdi)) {
initErrorHandling();
initReplayFactory();
data.setFiles(meta.getTextFileList(this));
data.filterProcessor = new TextFileFilterProcessor(meta.getFilter());
// If there are missing files,
// fail if we don't ignore errors
//
Result previousResult = getTrans().getPreviousResult();
Map<String, ResultFile> resultFiles = (previousResult != null) ? previousResult.getResultFiles() : null;
if ((previousResult == null || resultFiles == null || resultFiles.size() == 0) && data.getFiles().nrOfMissingFiles() > 0 && !meta.isAcceptingFilenames() && !meta.isErrorIgnored()) {
logError(BaseMessages.getString(PKG, "TextFileInput.Log.Error.NoFilesSpecified"));
return false;
}
String clusterSize = getVariable(Const.INTERNAL_VARIABLE_CLUSTER_SIZE);
if (!Utils.isEmpty(clusterSize) && Integer.valueOf(clusterSize) > 1) {
// TODO: add metadata to configure this.
String nr = getVariable(Const.INTERNAL_VARIABLE_SLAVE_SERVER_NUMBER);
if (log.isDetailed()) {
logDetailed("Running on slave server #" + nr + " : assuming that each slave reads a dedicated part of the same file(s).");
}
}
// If no nullif field is supplied, take the default.
// String null_value = nullif;
// if (null_value == null)
// {
// // value="";
// }
// String null_cmp = Const.rightPad(new StringBuilder(null_value), pol.length());
// calculate the file format type in advance so we can use a switch
data.fileFormatType = meta.getFileFormatTypeNr();
// calculate the file type in advance CSV or Fixed?
data.fileType = meta.getFileTypeNr();
// Handle the possibility of a variable substitution
data.separator = environmentSubstitute(meta.getSeparator());
data.enclosure = environmentSubstitute(meta.getEnclosure());
data.escapeCharacter = environmentSubstitute(meta.getEscapeCharacter());
// Add additional fields
if (!Utils.isEmpty(meta.getShortFileNameField())) {
data.addShortFilename = true;
}
if (!Utils.isEmpty(meta.getPathField())) {
data.addPath = true;
}
if (!Utils.isEmpty(meta.getExtensionField())) {
data.addExtension = true;
}
if (!Utils.isEmpty(meta.getSizeField())) {
data.addSize = true;
}
if (!Utils.isEmpty(meta.isHiddenField())) {
data.addIsHidden = true;
}
if (!Utils.isEmpty(meta.getLastModificationDateField())) {
data.addLastModificationDate = true;
}
if (!Utils.isEmpty(meta.getUriField())) {
data.addUri = true;
}
if (!Utils.isEmpty(meta.getRootUriField())) {
data.addRootUri = true;
}
return true;
}
return false;
}
use of org.pentaho.di.core.ResultFile in project pentaho-kettle by pentaho.
the class TextFileOutput method initFileStreamWriter.
public void initFileStreamWriter(String filename) throws KettleException {
data.writer = null;
try {
BufferedOutputStream bufferedOutputStream;
OutputStream fileOutputStream;
CompressionOutputStream compressionOutputStream;
TextFileOutputData.FileStream fileStreams = null;
try {
if (meta.getSplitEvery() > 0) {
if (filename.equals(data.getFileStreamsCollection().getLastFileName())) {
fileStreams = data.getFileStreamsCollection().getLastStream();
}
} else {
fileStreams = data.getFileStreamsCollection().getStream(filename);
}
boolean writingToFileForFirstTime = fileStreams != null;
boolean createParentDirIfNotExists = meta.isCreateParentFolder();
boolean appendToExistingFile = meta.isFileAppended();
if (fileStreams == null) {
// Opening file for first time
CompressionProvider compressionProvider = getCompressionProvider();
boolean isZipFile = compressionProvider instanceof ZIPCompressionProvider;
if (appendToExistingFile && isZipFile && isFileExists(filename)) {
throw new KettleException("Can not append to an existing zip file : " + filename);
}
int maxOpenFiles = getMaxOpenFiles();
if ((maxOpenFiles > 0) && (data.getFileStreamsCollection().getNumOpenFiles() >= maxOpenFiles)) {
// If the file we're going to close is a zip file, going to remove it from the collection of files
// that have been opened. We do this because it is not possible to reopen a
// zip file for append. By removing it from the collection, if the same file is referenced later, it will look
// like we're opening the file for the first time, and if we're set up to append to existing files it will cause and
// exception to be thrown, which is the desired result.
data.getFileStreamsCollection().closeOldestOpenFile(isZipFile);
}
if (createParentDirIfNotExists && ((data.getFileStreamsCollection().size() == 0) || meta.isFileNameInField())) {
createParentFolder(filename);
}
if (log.isDetailed()) {
logDetailed("Opening output stream using provider: " + compressionProvider.getName());
}
fileOutputStream = getOutputStream(filename, getTransMeta(), !isZipFile && appendToExistingFile);
compressionOutputStream = compressionProvider.createOutputStream(fileOutputStream);
// The compression output stream may also archive entries. For this we create the filename
// (with appropriate extension) and add it as an entry to the output stream. For providers
// that do not archive entries, they should use the default no-op implementation.
compressionOutputStream.addEntry(filename, environmentSubstitute(meta.getExtension()));
if (log.isDetailed()) {
if (!Utils.isEmpty(meta.getEncoding())) {
logDetailed("Opening output stream in encoding: " + meta.getEncoding());
} else {
logDetailed("Opening output stream in default encoding");
}
}
bufferedOutputStream = new BufferedOutputStream(compressionOutputStream, 5000);
fileStreams = data.new FileStream(fileOutputStream, compressionOutputStream, bufferedOutputStream);
data.getFileStreamsCollection().add(filename, fileStreams);
if (log.isDetailed()) {
logDetailed("Opened new file with name [" + KettleVFS.getFriendlyURI(filename) + "]");
}
} else if (fileStreams.getBufferedOutputStream() == null) {
// File was previously opened and now needs to be reopened.
int maxOpenFiles = getMaxOpenFiles();
if ((maxOpenFiles > 0) && (data.getFileStreamsCollection().getNumOpenFiles() >= maxOpenFiles)) {
data.getFileStreamsCollection().closeOldestOpenFile(false);
}
fileOutputStream = getOutputStream(filename, getTransMeta(), true);
CompressionProvider compressionProvider = getCompressionProvider();
compressionOutputStream = compressionProvider.createOutputStream(fileOutputStream);
compressionOutputStream.addEntry(filename, environmentSubstitute(meta.getExtension()));
bufferedOutputStream = new BufferedOutputStream(compressionOutputStream, 5000);
fileStreams.setFileOutputStream(fileOutputStream);
fileStreams.setCompressedOutputStream(compressionOutputStream);
fileStreams.setBufferedOutputStream(bufferedOutputStream);
}
if (writingToFileForFirstTime) {
if (meta.isAddToResultFiles()) {
// Add this to the result file names...
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, getFileObject(filename, getTransMeta()), getTransMeta().getName(), getStepname());
if (resultFile != null) {
resultFile.setComment(BaseMessages.getString(PKG, "TextFileOutput.AddResultFile"));
addResultFile(resultFile);
}
}
}
} catch (Exception e) {
if (!(e instanceof KettleException)) {
throw new KettleException("Error opening new file : " + e.toString());
} else {
throw (KettleException) e;
}
}
fileStreams.setDirty(true);
data.fos = fileStreams.getFileOutputStream();
data.out = fileStreams.getCompressedOutputStream();
data.writer = fileStreams.getBufferedOutputStream();
} catch (KettleException ke) {
throw ke;
} catch (Exception e) {
throw new KettleException("Error opening new file : " + e.toString());
}
}
use of org.pentaho.di.core.ResultFile in project pentaho-kettle by pentaho.
the class TransExecutor method collectExecutionResultFiles.
@VisibleForTesting
void collectExecutionResultFiles(Result result) throws KettleException {
RowSet resultFilesRowSet = getData().getResultFilesRowSet();
if (meta.getResultFilesTargetStepMeta() != null && result.getResultFilesList() != null && resultFilesRowSet != null) {
for (ResultFile resultFile : result.getResultFilesList()) {
Object[] targetRow = RowDataUtil.allocateRowData(getData().getResultFilesOutputRowMeta().size());
int idx = 0;
targetRow[idx++] = resultFile.getFile().getName().toString();
// TODO: time, origin, ...
putRowTo(getData().getResultFilesOutputRowMeta(), targetRow, resultFilesRowSet);
}
}
}
Aggregations