use of org.pentaho.di.core.compress.CompressionOutputStream in project pentaho-kettle by pentaho.
the class TextFileOutput method initFileStreamWriter.
public void initFileStreamWriter(String filename) throws KettleException {
data.writer = null;
try {
BufferedOutputStream bufferedOutputStream;
OutputStream fileOutputStream;
CompressionOutputStream compressionOutputStream;
TextFileOutputData.FileStream fileStreams = null;
try {
if (meta.getSplitEvery() > 0) {
if (filename.equals(data.getFileStreamsCollection().getLastFileName())) {
fileStreams = data.getFileStreamsCollection().getLastStream();
}
} else {
fileStreams = data.getFileStreamsCollection().getStream(filename);
}
boolean writingToFileForFirstTime = fileStreams != null;
boolean createParentDirIfNotExists = meta.isCreateParentFolder();
boolean appendToExistingFile = meta.isFileAppended();
if (fileStreams == null) {
// Opening file for first time
CompressionProvider compressionProvider = getCompressionProvider();
boolean isZipFile = compressionProvider instanceof ZIPCompressionProvider;
if (appendToExistingFile && isZipFile && isFileExists(filename)) {
throw new KettleException("Can not append to an existing zip file : " + filename);
}
int maxOpenFiles = getMaxOpenFiles();
if ((maxOpenFiles > 0) && (data.getFileStreamsCollection().getNumOpenFiles() >= maxOpenFiles)) {
// If the file we're going to close is a zip file, going to remove it from the collection of files
// that have been opened. We do this because it is not possible to reopen a
// zip file for append. By removing it from the collection, if the same file is referenced later, it will look
// like we're opening the file for the first time, and if we're set up to append to existing files it will cause and
// exception to be thrown, which is the desired result.
data.getFileStreamsCollection().closeOldestOpenFile(isZipFile);
}
if (createParentDirIfNotExists && ((data.getFileStreamsCollection().size() == 0) || meta.isFileNameInField())) {
createParentFolder(filename);
}
if (log.isDetailed()) {
logDetailed("Opening output stream using provider: " + compressionProvider.getName());
}
fileOutputStream = getOutputStream(filename, getTransMeta(), !isZipFile && appendToExistingFile);
compressionOutputStream = compressionProvider.createOutputStream(fileOutputStream);
// The compression output stream may also archive entries. For this we create the filename
// (with appropriate extension) and add it as an entry to the output stream. For providers
// that do not archive entries, they should use the default no-op implementation.
compressionOutputStream.addEntry(filename, environmentSubstitute(meta.getExtension()));
if (log.isDetailed()) {
if (!Utils.isEmpty(meta.getEncoding())) {
logDetailed("Opening output stream in encoding: " + meta.getEncoding());
} else {
logDetailed("Opening output stream in default encoding");
}
}
bufferedOutputStream = new BufferedOutputStream(compressionOutputStream, 5000);
fileStreams = data.new FileStream(fileOutputStream, compressionOutputStream, bufferedOutputStream);
data.getFileStreamsCollection().add(filename, fileStreams);
if (log.isDetailed()) {
logDetailed("Opened new file with name [" + KettleVFS.getFriendlyURI(filename) + "]");
}
} else if (fileStreams.getBufferedOutputStream() == null) {
// File was previously opened and now needs to be reopened.
int maxOpenFiles = getMaxOpenFiles();
if ((maxOpenFiles > 0) && (data.getFileStreamsCollection().getNumOpenFiles() >= maxOpenFiles)) {
data.getFileStreamsCollection().closeOldestOpenFile(false);
}
fileOutputStream = getOutputStream(filename, getTransMeta(), true);
CompressionProvider compressionProvider = getCompressionProvider();
compressionOutputStream = compressionProvider.createOutputStream(fileOutputStream);
compressionOutputStream.addEntry(filename, environmentSubstitute(meta.getExtension()));
bufferedOutputStream = new BufferedOutputStream(compressionOutputStream, 5000);
fileStreams.setFileOutputStream(fileOutputStream);
fileStreams.setCompressedOutputStream(compressionOutputStream);
fileStreams.setBufferedOutputStream(bufferedOutputStream);
}
if (writingToFileForFirstTime) {
if (meta.isAddToResultFiles()) {
// Add this to the result file names...
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, getFileObject(filename, getTransMeta()), getTransMeta().getName(), getStepname());
if (resultFile != null) {
resultFile.setComment(BaseMessages.getString(PKG, "TextFileOutput.AddResultFile"));
addResultFile(resultFile);
}
}
}
} catch (Exception e) {
if (!(e instanceof KettleException)) {
throw new KettleException("Error opening new file : " + e.toString());
} else {
throw (KettleException) e;
}
}
fileStreams.setDirty(true);
data.fos = fileStreams.getFileOutputStream();
data.out = fileStreams.getCompressedOutputStream();
data.writer = fileStreams.getBufferedOutputStream();
} catch (KettleException ke) {
throw ke;
} catch (Exception e) {
throw new KettleException("Error opening new file : " + e.toString());
}
}
Aggregations