use of org.pentaho.di.core.ResultFile in project pentaho-kettle by pentaho.
the class JobEntryTrans method execute.
/**
* Execute this job entry and return the result. In this case it means, just set the result boolean in the Result
* class.
*
* @param result The result of the previous execution
* @param nr the job entry number
* @return The Result of the execution.
*/
@Override
public Result execute(Result result, int nr) throws KettleException {
result.setEntryNr(nr);
LogChannelFileWriter logChannelFileWriter = null;
LogLevel transLogLevel = parentJob.getLogLevel();
// Set Embedded NamedCluter MetatStore Provider Key so that it can be passed to VFS
if (parentJobMeta.getNamedClusterEmbedManager() != null) {
parentJobMeta.getNamedClusterEmbedManager().passEmbeddedMetastoreKey(this, parentJobMeta.getEmbeddedMetastoreProviderKey());
}
String realLogFilename = "";
if (setLogfile) {
transLogLevel = logFileLevel;
realLogFilename = environmentSubstitute(getLogFilename());
// if we do not have one, we must fail
if (Utils.isEmpty(realLogFilename)) {
logError(BaseMessages.getString(PKG, "JobTrans.Exception.LogFilenameMissing"));
result.setNrErrors(1);
result.setResult(false);
return result;
}
// create parent folder?
if (!FileUtil.createParentFolder(PKG, realLogFilename, createParentFolder, this.getLogChannel(), this)) {
result.setNrErrors(1);
result.setResult(false);
return result;
}
try {
logChannelFileWriter = new LogChannelFileWriter(this.getLogChannelId(), KettleVFS.getFileObject(realLogFilename, this), setAppendLogfile);
logChannelFileWriter.startLogging();
} catch (KettleException e) {
logError(BaseMessages.getString(PKG, "JobTrans.Error.UnableOpenAppender", realLogFilename, e.toString()));
logError(Const.getStackTracker(e));
result.setNrErrors(1);
result.setResult(false);
return result;
}
}
//
switch(specificationMethod) {
case FILENAME:
if (isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobTrans.Log.OpeningTrans", environmentSubstitute(getFilename())));
}
break;
case REPOSITORY_BY_NAME:
if (isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobTrans.Log.OpeningTransInDirec", environmentSubstitute(getFilename()), environmentSubstitute(directory)));
}
break;
case REPOSITORY_BY_REFERENCE:
if (isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobTrans.Log.OpeningTransByReference", transObjectId));
}
break;
default:
break;
}
// Load the transformation only once for the complete loop!
// Throws an exception if it was not possible to load the transformation. For example, the XML file doesn't exist or
// the repository is down.
// Log the stack trace and return an error condition from this
//
TransMeta transMeta = null;
try {
transMeta = getTransMeta(rep, metaStore, this);
} catch (KettleException e) {
logError(BaseMessages.getString(PKG, "JobTrans.Exception.UnableToRunJob", parentJobMeta.getName(), getName(), StringUtils.trim(e.getMessage())), e);
result.setNrErrors(1);
result.setResult(false);
return result;
}
int iteration = 0;
String[] args1 = arguments;
if (args1 == null || args1.length == 0) {
// No arguments set, look at the parent job.
args1 = parentJob.getArguments();
}
// initializeVariablesFrom(parentJob);
//
// For the moment only do variable translation at the start of a job, not
// for every input row (if that would be switched on). This is for safety,
// the real argument setting is later on.
//
String[] args = null;
if (args1 != null) {
args = new String[args1.length];
for (int idx = 0; idx < args1.length; idx++) {
args[idx] = environmentSubstitute(args1[idx]);
}
}
RowMetaAndData resultRow = null;
boolean first = true;
List<RowMetaAndData> rows = new ArrayList<RowMetaAndData>(result.getRows());
while ((first && !execPerRow) || (execPerRow && rows != null && iteration < rows.size() && result.getNrErrors() == 0) && !parentJob.isStopped()) {
//
if (execPerRow) {
result.getRows().clear();
}
if (rows != null && execPerRow) {
resultRow = rows.get(iteration);
} else {
resultRow = null;
}
NamedParams namedParam = new NamedParamsDefault();
if (parameters != null) {
for (int idx = 0; idx < parameters.length; idx++) {
if (!Utils.isEmpty(parameters[idx])) {
// We have a parameter
//
namedParam.addParameterDefinition(parameters[idx], "", "Job entry runtime");
if (Utils.isEmpty(Const.trim(parameterFieldNames[idx]))) {
// There is no field name specified.
//
String value = Const.NVL(environmentSubstitute(parameterValues[idx]), "");
namedParam.setParameterValue(parameters[idx], value);
} else {
// something filled in, in the field column...
//
String value = "";
if (resultRow != null) {
value = resultRow.getString(parameterFieldNames[idx], "");
}
namedParam.setParameterValue(parameters[idx], value);
}
}
}
}
first = false;
Result previousResult = result;
try {
if (isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobTrans.StartingTrans", getFilename(), getName(), getDescription()));
}
if (clearResultRows) {
previousResult.setRows(new ArrayList<RowMetaAndData>());
}
if (clearResultFiles) {
previousResult.getResultFiles().clear();
}
/*
* Set one or more "result" rows on the transformation...
*/
if (execPerRow) {
if (argFromPrevious) {
// Copy the input row to the (command line) arguments
args = null;
if (resultRow != null) {
args = new String[resultRow.size()];
for (int i = 0; i < resultRow.size(); i++) {
args[i] = resultRow.getString(i, null);
}
}
} else {
// Just pass a single row
List<RowMetaAndData> newList = new ArrayList<RowMetaAndData>();
newList.add(resultRow);
// This previous result rows list can be either empty or not.
// Depending on the checkbox "clear result rows"
// In this case, it would execute the transformation with one extra row each time
// Can't figure out a real use-case for it, but hey, who am I to decide that, right?
// :-)
//
previousResult.getRows().addAll(newList);
}
if (paramsFromPrevious) {
if (parameters != null) {
for (int idx = 0; idx < parameters.length; idx++) {
if (!Utils.isEmpty(parameters[idx])) {
// We have a parameter
if (Utils.isEmpty(Const.trim(parameterFieldNames[idx]))) {
namedParam.setParameterValue(parameters[idx], Const.NVL(environmentSubstitute(parameterValues[idx]), ""));
} else {
String fieldValue = "";
if (resultRow != null) {
fieldValue = resultRow.getString(parameterFieldNames[idx], "");
}
// Get the value from the input stream
namedParam.setParameterValue(parameters[idx], Const.NVL(fieldValue, ""));
}
}
}
}
}
} else {
if (argFromPrevious) {
// Only put the first Row on the arguments
args = null;
if (resultRow != null) {
args = new String[resultRow.size()];
for (int i = 0; i < resultRow.size(); i++) {
args[i] = resultRow.getString(i, null);
}
}
}
if (paramsFromPrevious) {
// Copy the input the parameters
if (parameters != null) {
for (int idx = 0; idx < parameters.length; idx++) {
if (!Utils.isEmpty(parameters[idx])) {
// We have a parameter
if (Utils.isEmpty(Const.trim(parameterFieldNames[idx]))) {
namedParam.setParameterValue(parameters[idx], Const.NVL(environmentSubstitute(parameterValues[idx]), ""));
} else {
String fieldValue = "";
if (resultRow != null) {
fieldValue = resultRow.getString(parameterFieldNames[idx], "");
}
// Get the value from the input stream
namedParam.setParameterValue(parameters[idx], Const.NVL(fieldValue, ""));
}
}
}
}
}
}
// Handle the parameters...
//
transMeta.clearParameters();
String[] parameterNames = transMeta.listParameters();
StepWithMappingMeta.activateParams(transMeta, transMeta, this, parameterNames, parameters, parameterValues);
boolean doFallback = true;
SlaveServer remoteSlaveServer = null;
TransExecutionConfiguration executionConfiguration = new TransExecutionConfiguration();
if (!Utils.isEmpty(runConfiguration)) {
log.logBasic(BaseMessages.getString(PKG, "JobTrans.RunConfig.Message"), runConfiguration);
runConfiguration = environmentSubstitute(runConfiguration);
executionConfiguration.setRunConfiguration(runConfiguration);
try {
ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.SpoonTransBeforeStart.id, new Object[] { executionConfiguration, parentJob.getJobMeta(), transMeta, rep });
if (!executionConfiguration.isExecutingLocally() && !executionConfiguration.isExecutingRemotely() && !executionConfiguration.isExecutingClustered()) {
result.setResult(true);
return result;
}
clustering = executionConfiguration.isExecutingClustered();
remoteSlaveServer = executionConfiguration.getRemoteServer();
doFallback = false;
} catch (KettleException e) {
log.logError(e.getMessage(), getName());
result.setNrErrors(1);
result.setResult(false);
return result;
}
}
if (doFallback) {
//
if (!Utils.isEmpty(remoteSlaveServerName)) {
String realRemoteSlaveServerName = environmentSubstitute(remoteSlaveServerName);
remoteSlaveServer = parentJob.getJobMeta().findSlaveServer(realRemoteSlaveServerName);
if (remoteSlaveServer == null) {
throw new KettleException(BaseMessages.getString(PKG, "JobTrans.Exception.UnableToFindRemoteSlaveServer", realRemoteSlaveServerName));
}
}
}
//
if (clustering) {
executionConfiguration.setClusterPosting(true);
executionConfiguration.setClusterPreparing(true);
executionConfiguration.setClusterStarting(true);
executionConfiguration.setClusterShowingTransformation(false);
executionConfiguration.setSafeModeEnabled(false);
executionConfiguration.setRepository(rep);
executionConfiguration.setLogLevel(transLogLevel);
executionConfiguration.setPreviousResult(previousResult);
// Also pass the variables from the transformation into the execution configuration
// That way it can go over the HTTP connection to the slave server.
//
executionConfiguration.setVariables(transMeta);
// Also set the arguments...
//
executionConfiguration.setArgumentStrings(args);
if (parentJob.getJobMeta().isBatchIdPassed()) {
executionConfiguration.setPassedBatchId(parentJob.getPassedBatchId());
}
TransSplitter transSplitter = null;
long errors = 0;
try {
transSplitter = Trans.executeClustered(transMeta, executionConfiguration);
// Monitor the running transformations, wait until they are done.
// Also kill them all if anything goes bad
// Also clean up afterwards...
//
errors += Trans.monitorClusteredTransformation(log, transSplitter, parentJob);
} catch (Exception e) {
logError("Error during clustered execution. Cleaning up clustered execution.", e);
// In case something goes wrong, make sure to clean up afterwards!
//
errors++;
if (transSplitter != null) {
Trans.cleanupCluster(log, transSplitter);
} else {
// Try to clean anyway...
//
SlaveServer master = null;
for (StepMeta stepMeta : transMeta.getSteps()) {
if (stepMeta.isClustered()) {
for (SlaveServer slaveServer : stepMeta.getClusterSchema().getSlaveServers()) {
if (slaveServer.isMaster()) {
master = slaveServer;
break;
}
}
}
}
if (master != null) {
master.deAllocateServerSockets(transMeta.getName(), null);
}
}
}
result.clear();
if (transSplitter != null) {
Result clusterResult = Trans.getClusteredTransformationResult(log, transSplitter, parentJob, executionConfiguration.isLogRemoteExecutionLocally());
result.add(clusterResult);
}
result.setNrErrors(result.getNrErrors() + errors);
} else if (remoteSlaveServer != null) {
// Execute this transformation remotely
//
// Make sure we can parameterize the slave server connection
//
remoteSlaveServer.shareVariablesWith(this);
// Remote execution...
//
executionConfiguration.setPreviousResult(previousResult.clone());
executionConfiguration.setArgumentStrings(args);
executionConfiguration.setVariables(this);
executionConfiguration.setRemoteServer(remoteSlaveServer);
executionConfiguration.setLogLevel(transLogLevel);
executionConfiguration.setRepository(rep);
executionConfiguration.setLogFileName(realLogFilename);
executionConfiguration.setSetAppendLogfile(setAppendLogfile);
executionConfiguration.setSetLogfile(setLogfile);
Map<String, String> params = executionConfiguration.getParams();
for (String param : transMeta.listParameters()) {
String value = Const.NVL(transMeta.getParameterValue(param), Const.NVL(transMeta.getParameterDefault(param), transMeta.getVariable(param)));
params.put(param, value);
}
if (parentJob.getJobMeta().isBatchIdPassed()) {
executionConfiguration.setPassedBatchId(parentJob.getPassedBatchId());
}
// Send the XML over to the slave server
// Also start the transformation over there...
//
String carteObjectId = Trans.sendToSlaveServer(transMeta, executionConfiguration, rep, metaStore);
// Now start the monitoring...
//
SlaveServerTransStatus transStatus = null;
while (!parentJob.isStopped() && waitingToFinish) {
try {
transStatus = remoteSlaveServer.getTransStatus(transMeta.getName(), carteObjectId, 0);
if (!transStatus.isRunning()) {
// The transformation is finished, get the result...
//
// get the status with the result ( we don't do it above because of changing PDI-15781)
transStatus = remoteSlaveServer.getTransStatus(transMeta.getName(), carteObjectId, 0, true);
Result remoteResult = transStatus.getResult();
result.clear();
result.add(remoteResult);
//
if (remoteResult.isStopped()) {
//
result.setNrErrors(result.getNrErrors() + 1);
}
// Make sure to clean up : write a log record etc, close any left-over sockets etc.
//
remoteSlaveServer.cleanupTransformation(transMeta.getName(), carteObjectId);
break;
}
} catch (Exception e1) {
logError(BaseMessages.getString(PKG, "JobTrans.Error.UnableContactSlaveServer", "" + remoteSlaveServer, transMeta.getName()), e1);
result.setNrErrors(result.getNrErrors() + 1L);
// Stop looking too, chances are too low the server will come back on-line
break;
}
// sleep for 2 seconds
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
// Ignore
}
}
if (parentJob.isStopped()) {
//
if (transStatus == null || transStatus.isRunning()) {
// Try a remote abort ...
//
remoteSlaveServer.stopTransformation(transMeta.getName(), transStatus.getId());
// And a cleanup...
//
remoteSlaveServer.cleanupTransformation(transMeta.getName(), transStatus.getId());
// Set an error state!
//
result.setNrErrors(result.getNrErrors() + 1L);
}
}
} else {
// Execute this transformation on the local machine
//
// Create the transformation from meta-data
//
// trans = new Trans( transMeta, this );
final TransMeta meta = transMeta;
trans = new TransSupplier(transMeta, log, () -> new Trans(meta)).get();
trans.setParent(this);
// Pass the socket repository as early as possible...
//
trans.setSocketRepository(parentJob.getSocketRepository());
if (parentJob.getJobMeta().isBatchIdPassed()) {
trans.setPassedBatchId(parentJob.getPassedBatchId());
}
// set the parent job on the transformation, variables are taken from here...
//
trans.setParentJob(parentJob);
trans.setParentVariableSpace(parentJob);
trans.setLogLevel(transLogLevel);
trans.setPreviousResult(previousResult);
trans.setArguments(arguments);
// Mappings need the repository to load from
//
trans.setRepository(rep);
// inject the metaStore
trans.setMetaStore(metaStore);
// First get the root job
//
Job rootJob = parentJob;
while (rootJob.getParentJob() != null) {
rootJob = rootJob.getParentJob();
}
// Get the start and end-date from the root job...
//
trans.setJobStartDate(rootJob.getStartDate());
trans.setJobEndDate(rootJob.getEndDate());
//
for (DelegationListener delegationListener : parentJob.getDelegationListeners()) {
// TODO: copy some settings in the job execution configuration, not strictly needed
// but the execution configuration information is useful in case of a job re-start
//
delegationListener.transformationDelegationStarted(trans, new TransExecutionConfiguration());
}
try {
// Start execution...
//
trans.execute(args);
// TODO is it possible to implement Observer pattern to avoid Thread.sleep here?
while (!trans.isFinished() && trans.getErrors() == 0) {
if (parentJob.isStopped()) {
trans.stopAll();
break;
} else {
try {
Thread.sleep(0, 500);
} catch (InterruptedException e) {
// Ignore errors
}
}
}
trans.waitUntilFinished();
if (parentJob.isStopped() || trans.getErrors() != 0) {
trans.stopAll();
result.setNrErrors(1);
}
Result newResult = trans.getResult();
// clear only the numbers, NOT the files or rows.
result.clear();
result.add(newResult);
// Set the result rows too, if any ...
if (!Utils.isEmpty(newResult.getRows())) {
result.setRows(newResult.getRows());
}
if (setLogfile) {
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_LOG, KettleVFS.getFileObject(realLogFilename, this), parentJob.getJobname(), toString());
result.getResultFiles().put(resultFile.getFile().toString(), resultFile);
}
} catch (KettleException e) {
logError(BaseMessages.getString(PKG, "JobTrans.Error.UnablePrepareExec"), e);
result.setNrErrors(1);
}
}
} catch (Exception e) {
logError(BaseMessages.getString(PKG, "JobTrans.ErrorUnableOpenTrans", e.getMessage()));
logError(Const.getStackTracker(e));
result.setNrErrors(1);
}
iteration++;
}
if (setLogfile) {
if (logChannelFileWriter != null) {
logChannelFileWriter.stopLogging();
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_LOG, logChannelFileWriter.getLogFile(), parentJob.getJobname(), getName());
result.getResultFiles().put(resultFile.getFile().toString(), resultFile);
//
if (logChannelFileWriter.getException() != null) {
logError("Unable to open log file [" + getLogFilename() + "] : ");
logError(Const.getStackTracker(logChannelFileWriter.getException()));
result.setNrErrors(1);
result.setResult(false);
return result;
}
}
}
if (result.getNrErrors() == 0) {
result.setResult(true);
} else {
result.setResult(false);
}
return result;
}
use of org.pentaho.di.core.ResultFile in project pentaho-kettle by pentaho.
the class JobEntryUnZip method addFilenameToResultFilenames.
private void addFilenameToResultFilenames(Result result, Job parentJob, String newfile) throws Exception {
if (addfiletoresult) {
// Add file to result files name
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, KettleVFS.getFileObject(newfile, this), parentJob.getJobname(), toString());
result.getResultFiles().put(resultFile.getFile().toString(), resultFile);
}
}
use of org.pentaho.di.core.ResultFile in project pentaho-kettle by pentaho.
the class JobEntryZipFile method processRowFile.
public boolean processRowFile(Job parentJob, Result result, String realZipfilename, String realWildcard, String realWildcardExclude, String realSourceDirectoryOrFile, String realMovetodirectory, boolean createparentfolder) {
boolean Fileexists = false;
File tempFile = null;
File fileZip;
boolean resultat = false;
boolean renameOk = false;
boolean orginExist = false;
// Check if target file/folder exists!
FileObject originFile = null;
ZipInputStream zin = null;
byte[] buffer;
OutputStream dest = null;
BufferedOutputStreamWithCloseDetection buff = null;
ZipOutputStream out = null;
ZipEntry entry;
String localSourceFilename = realSourceDirectoryOrFile;
try {
originFile = KettleVFS.getFileObject(realSourceDirectoryOrFile, this);
localSourceFilename = KettleVFS.getFilename(originFile);
orginExist = originFile.exists();
} catch (Exception e) {
// Ignore errors
} finally {
if (originFile != null) {
try {
originFile.close();
} catch (IOException ex) {
logError("Error closing file '" + originFile.toString() + "'", ex);
}
}
}
String localrealZipfilename = realZipfilename;
if (realZipfilename != null && orginExist) {
FileObject fileObject = null;
try {
fileObject = KettleVFS.getFileObject(localrealZipfilename, this);
localrealZipfilename = KettleVFS.getFilename(fileObject);
// Check if Zip File exists
if (fileObject.exists()) {
Fileexists = true;
if (log.isDebug()) {
logDebug(BaseMessages.getString(PKG, "JobZipFiles.Zip_FileExists1.Label") + localrealZipfilename + BaseMessages.getString(PKG, "JobZipFiles.Zip_FileExists2.Label"));
}
}
// Let's see if we need to create parent folder of destination zip filename
if (createparentfolder) {
createParentFolder(localrealZipfilename);
}
// Let's start the process now
if (ifZipFileExists == 3 && Fileexists) {
// the zip file exists and user want to Fail
resultat = false;
} else if (ifZipFileExists == 2 && Fileexists) {
// the zip file exists and user want to do nothing
if (addFileToResult) {
// Add file to result files name
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, fileObject, parentJob.getJobname(), toString());
result.getResultFiles().put(resultFile.getFile().toString(), resultFile);
}
resultat = true;
} else if (afterZip == 2 && realMovetodirectory == null) {
// After Zip, Move files..User must give a destination Folder
resultat = false;
logError(BaseMessages.getString(PKG, "JobZipFiles.AfterZip_No_DestinationFolder_Defined.Label"));
} else {
// After Zip, Move files..User must give a destination Folder
// Let's see if we deal with file or folder
FileObject[] fileList;
FileObject sourceFileOrFolder = KettleVFS.getFileObject(localSourceFilename, this);
boolean isSourceDirectory = sourceFileOrFolder.getType().equals(FileType.FOLDER);
final Pattern pattern;
final Pattern patternExclude;
if (isSourceDirectory) {
//
if (!Utils.isEmpty(realWildcard)) {
pattern = Pattern.compile(realWildcard);
} else {
pattern = null;
}
if (!Utils.isEmpty(realWildcardExclude)) {
patternExclude = Pattern.compile(realWildcardExclude);
} else {
patternExclude = null;
}
//
if (includingSubFolders) {
fileList = sourceFileOrFolder.findFiles(new ZipJobEntryPatternFileSelector(pattern, patternExclude));
} else {
fileList = sourceFileOrFolder.getChildren();
}
} else {
pattern = null;
patternExclude = null;
// Target is a file
fileList = new FileObject[] { sourceFileOrFolder };
}
if (fileList.length == 0) {
resultat = false;
logError(BaseMessages.getString(PKG, "JobZipFiles.Log.FolderIsEmpty", localSourceFilename));
} else if (!checkContainsFile(localSourceFilename, fileList, isSourceDirectory)) {
resultat = false;
logError(BaseMessages.getString(PKG, "JobZipFiles.Log.NoFilesInFolder", localSourceFilename));
} else {
if (ifZipFileExists == 0 && Fileexists) {
// do we have already a .zip at the end?
if (localrealZipfilename.toLowerCase().endsWith(".zip")) {
// strip this off
localrealZipfilename = localrealZipfilename.substring(0, localrealZipfilename.length() - 4);
}
localrealZipfilename += "_" + StringUtil.getFormattedDateTimeNow(true) + ".zip";
if (log.isDebug()) {
logDebug(BaseMessages.getString(PKG, "JobZipFiles.Zip_FileNameChange1.Label") + localrealZipfilename + BaseMessages.getString(PKG, "JobZipFiles.Zip_FileNameChange1.Label"));
}
} else if (ifZipFileExists == 1 && Fileexists) {
// the zip file exists and user want to append
// get a temp file
fileZip = getFile(localrealZipfilename);
tempFile = File.createTempFile(fileZip.getName(), null);
// delete it, otherwise we cannot rename existing zip to it.
tempFile.delete();
renameOk = fileZip.renameTo(tempFile);
if (!renameOk) {
logError(BaseMessages.getString(PKG, "JobZipFiles.Cant_Rename_Temp1.Label") + fileZip.getAbsolutePath() + BaseMessages.getString(PKG, "JobZipFiles.Cant_Rename_Temp2.Label") + tempFile.getAbsolutePath() + BaseMessages.getString(PKG, "JobZipFiles.Cant_Rename_Temp3.Label"));
}
if (log.isDebug()) {
logDebug(BaseMessages.getString(PKG, "JobZipFiles.Zip_FileAppend1.Label") + localrealZipfilename + BaseMessages.getString(PKG, "JobZipFiles.Zip_FileAppend2.Label"));
}
}
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobZipFiles.Files_Found1.Label") + fileList.length + BaseMessages.getString(PKG, "JobZipFiles.Files_Found2.Label") + localSourceFilename + BaseMessages.getString(PKG, "JobZipFiles.Files_Found3.Label"));
}
// Prepare Zip File
buffer = new byte[18024];
dest = KettleVFS.getOutputStream(localrealZipfilename, this, false);
buff = new BufferedOutputStreamWithCloseDetection(dest);
out = new ZipOutputStream(buff);
HashSet<String> fileSet = new HashSet<String>();
if (renameOk) {
// User want to append files to existing Zip file
// The idea is to rename the existing zip file to a temporary file
// and then adds all entries in the existing zip along with the new files,
// excluding the zip entries that have the same name as one of the new files.
zin = new ZipInputStream(new FileInputStream(tempFile));
entry = zin.getNextEntry();
while (entry != null) {
String name = entry.getName();
if (!fileSet.contains(name)) {
// Add ZIP entry to output stream.
out.putNextEntry(new ZipEntry(name));
// Transfer bytes from the ZIP file to the output file
int len;
while ((len = zin.read(buffer)) > 0) {
out.write(buffer, 0, len);
}
fileSet.add(name);
}
entry = zin.getNextEntry();
}
// Close the streams
zin.close();
}
// Set the method
out.setMethod(ZipOutputStream.DEFLATED);
// Set the compression level
if (compressionRate == 0) {
out.setLevel(Deflater.NO_COMPRESSION);
} else if (compressionRate == 1) {
out.setLevel(Deflater.DEFAULT_COMPRESSION);
}
if (compressionRate == 2) {
out.setLevel(Deflater.BEST_COMPRESSION);
}
if (compressionRate == 3) {
out.setLevel(Deflater.BEST_SPEED);
}
// Specify Zipped files (After that we will move,delete them...)
FileObject[] zippedFiles = new FileObject[fileList.length];
int fileNum = 0;
// Get the files in the list...
for (int i = 0; i < fileList.length && !parentJob.isStopped(); i++) {
boolean getIt = true;
boolean getItexclude = false;
// ..only if target is a folder !
if (isSourceDirectory) {
// If we include sub-folders, we match on the whole name, not just the basename
//
String filename;
if (includingSubFolders) {
filename = fileList[i].getName().getPath();
} else {
filename = fileList[i].getName().getBaseName();
}
if (pattern != null) {
// Matches the base name of the file (backward compatible!)
//
Matcher matcher = pattern.matcher(filename);
getIt = matcher.matches();
}
if (patternExclude != null) {
Matcher matcherexclude = patternExclude.matcher(filename);
getItexclude = matcherexclude.matches();
}
}
// Get processing File
String targetFilename = KettleVFS.getFilename(fileList[i]);
if (sourceFileOrFolder.getType().equals(FileType.FILE)) {
targetFilename = localSourceFilename;
}
FileObject file = KettleVFS.getFileObject(targetFilename, this);
boolean isTargetDirectory = file.exists() && file.getType().equals(FileType.FOLDER);
if (getIt && !getItexclude && !isTargetDirectory && !fileSet.contains(targetFilename)) {
// We can add the file to the Zip Archive
if (log.isDebug()) {
logDebug(BaseMessages.getString(PKG, "JobZipFiles.Add_FilesToZip1.Label") + fileList[i] + BaseMessages.getString(PKG, "JobZipFiles.Add_FilesToZip2.Label") + localSourceFilename + BaseMessages.getString(PKG, "JobZipFiles.Add_FilesToZip3.Label"));
}
// Associate a file input stream for the current file
InputStream in = KettleVFS.getInputStream(file);
// Add ZIP entry to output stream.
//
String relativeName;
String fullName = fileList[i].getName().getPath();
String basePath = sourceFileOrFolder.getName().getPath();
if (isSourceDirectory) {
if (fullName.startsWith(basePath)) {
relativeName = fullName.substring(basePath.length() + 1);
} else {
relativeName = fullName;
}
} else if (isFromPrevious) {
int depth = determineDepth(environmentSubstitute(storedSourcePathDepth));
relativeName = determineZipfilenameForDepth(fullName, depth);
} else {
relativeName = fileList[i].getName().getBaseName();
}
out.putNextEntry(new ZipEntry(relativeName));
int len;
while ((len = in.read(buffer)) > 0) {
out.write(buffer, 0, len);
}
out.flush();
out.closeEntry();
// Close the current file input stream
in.close();
// Get Zipped File
zippedFiles[fileNum] = fileList[i];
fileNum = fileNum + 1;
}
}
// Close the ZipOutPutStream
out.close();
buff.close();
dest.close();
if (log.isBasic()) {
logBasic(BaseMessages.getString(PKG, "JobZipFiles.Log.TotalZippedFiles", "" + zippedFiles.length));
}
// Delete Temp File
if (tempFile != null) {
tempFile.delete();
}
// -----Get the list of Zipped Files and Move or Delete Them
if (afterZip == 1 || afterZip == 2) {
// iterate through the array of Zipped files
for (int i = 0; i < zippedFiles.length; i++) {
if (zippedFiles[i] != null) {
// Delete, Move File
FileObject fileObjectd = zippedFiles[i];
if (!isSourceDirectory) {
fileObjectd = KettleVFS.getFileObject(localSourceFilename, this);
}
// Here we can move, delete files
if (afterZip == 1) {
// Delete File
boolean deleted = fileObjectd.delete();
if (!deleted) {
resultat = false;
logError(BaseMessages.getString(PKG, "JobZipFiles.Cant_Delete_File1.Label") + localSourceFilename + Const.FILE_SEPARATOR + zippedFiles[i] + BaseMessages.getString(PKG, "JobZipFiles.Cant_Delete_File2.Label"));
}
// File deleted
if (log.isDebug()) {
logDebug(BaseMessages.getString(PKG, "JobZipFiles.File_Deleted1.Label") + localSourceFilename + Const.FILE_SEPARATOR + zippedFiles[i] + BaseMessages.getString(PKG, "JobZipFiles.File_Deleted2.Label"));
}
} else if (afterZip == 2) {
// Move File
FileObject fileObjectm = null;
try {
fileObjectm = KettleVFS.getFileObject(realMovetodirectory + Const.FILE_SEPARATOR + fileObjectd.getName().getBaseName(), this);
fileObjectd.moveTo(fileObjectm);
} catch (IOException e) {
logError(BaseMessages.getString(PKG, "JobZipFiles.Cant_Move_File1.Label") + zippedFiles[i] + BaseMessages.getString(PKG, "JobZipFiles.Cant_Move_File2.Label") + e.getMessage());
resultat = false;
} finally {
try {
if (fileObjectm != null) {
fileObjectm.close();
}
} catch (Exception e) {
if (fileObjectm != null) {
logError("Error closing file '" + fileObjectm.toString() + "'", e);
}
}
}
// File moved
if (log.isDebug()) {
logDebug(BaseMessages.getString(PKG, "JobZipFiles.File_Moved1.Label") + zippedFiles[i] + BaseMessages.getString(PKG, "JobZipFiles.File_Moved2.Label"));
}
}
}
}
}
if (addFileToResult) {
// Add file to result files name
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, fileObject, parentJob.getJobname(), toString());
result.getResultFiles().put(resultFile.getFile().toString(), resultFile);
}
resultat = true;
}
}
} catch (Exception e) {
logError(BaseMessages.getString(PKG, "JobZipFiles.Cant_CreateZipFile1.Label") + localrealZipfilename + BaseMessages.getString(PKG, "JobZipFiles.Cant_CreateZipFile2.Label"), e);
resultat = false;
} finally {
if (fileObject != null) {
try {
fileObject.close();
fileObject = null;
} catch (IOException ex) {
logError("Error closing file '" + fileObject.toString() + "'", ex);
}
}
try {
if (out != null) {
out.close();
}
if (buff != null) {
buff.close();
}
if (dest != null) {
dest.close();
}
if (zin != null) {
zin.close();
}
} catch (IOException ex) {
logError("Error closing zip file entry for file '" + originFile.toString() + "'", ex);
}
}
} else {
resultat = false;
if (localrealZipfilename == null) {
logError(BaseMessages.getString(PKG, "JobZipFiles.No_ZipFile_Defined.Label"));
}
if (!orginExist) {
logError(BaseMessages.getString(PKG, "JobZipFiles.No_FolderCible_Defined.Label", localSourceFilename));
}
}
// return a verifier
return resultat;
}
use of org.pentaho.di.core.ResultFile in project pentaho-kettle by pentaho.
the class JobEntryCopyFiles method processFileFolder.
boolean processFileFolder(String sourcefilefoldername, String destinationfilefoldername, String wildcard, Job parentJob, Result result) {
boolean entrystatus = false;
FileObject sourcefilefolder = null;
FileObject destinationfilefolder = null;
// Clear list files to remove after copy process
// This list is also added to result files name
list_files_remove.clear();
list_add_result.clear();
// Get real source, destination file and wildcard
String realSourceFilefoldername = environmentSubstitute(sourcefilefoldername);
String realDestinationFilefoldername = environmentSubstitute(destinationfilefoldername);
String realWildcard = environmentSubstitute(wildcard);
try {
sourcefilefolder = KettleVFS.getFileObject(realSourceFilefoldername, this);
destinationfilefolder = KettleVFS.getFileObject(realDestinationFilefoldername, this);
if (sourcefilefolder.exists()) {
// PDI will create it
if (CreateDestinationFolder(destinationfilefolder)) {
// Basic Tests
if (sourcefilefolder.getType().equals(FileType.FOLDER) && destination_is_a_file) {
// Source is a folder, destination is a file
// WARNING !!! CAN NOT COPY FOLDER TO FILE !!!
logError(BaseMessages.getString(PKG, "JobCopyFiles.Log.CanNotCopyFolderToFile", KettleVFS.getFriendlyURI(realSourceFilefoldername), KettleVFS.getFriendlyURI(realDestinationFilefoldername)));
NbrFail++;
} else {
if (destinationfilefolder.getType().equals(FileType.FOLDER) && sourcefilefolder.getType().equals(FileType.FILE)) {
// Source is a file, destination is a folder
// Copy the file to the destination folder
destinationfilefolder.copyFrom(sourcefilefolder.getParent(), new TextOneFileSelector(sourcefilefolder.getParent().toString(), sourcefilefolder.getName().getBaseName(), destinationfilefolder.toString()));
if (isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobCopyFiles.Log.FileCopied", KettleVFS.getFriendlyURI(sourcefilefolder), KettleVFS.getFriendlyURI(destinationfilefolder)));
}
} else if (sourcefilefolder.getType().equals(FileType.FILE) && destination_is_a_file) {
// Source is a file, destination is a file
destinationfilefolder.copyFrom(sourcefilefolder, new TextOneToOneFileSelector(destinationfilefolder));
} else {
// Both source and destination are folders
if (isDetailed()) {
logDetailed(" ");
logDetailed(BaseMessages.getString(PKG, "JobCopyFiles.Log.FetchFolder", KettleVFS.getFriendlyURI(sourcefilefolder)));
}
TextFileSelector textFileSelector = new TextFileSelector(sourcefilefolder, destinationfilefolder, realWildcard, parentJob);
try {
destinationfilefolder.copyFrom(sourcefilefolder, textFileSelector);
} finally {
textFileSelector.shutdown();
}
}
// Remove Files if needed
if (remove_source_files && !list_files_remove.isEmpty()) {
String sourceFilefoldername = sourcefilefolder.toString();
int trimPathLength = sourceFilefoldername.length() + 1;
FileObject removeFile;
for (Iterator<String> iter = list_files_remove.iterator(); iter.hasNext() && !parentJob.isStopped(); ) {
String fileremoventry = iter.next();
// re=null each iteration
removeFile = null;
// Try to get the file relative to the existing connection
if (fileremoventry.startsWith(sourceFilefoldername)) {
if (trimPathLength < fileremoventry.length()) {
removeFile = sourcefilefolder.getChild(fileremoventry.substring(trimPathLength));
}
}
// Unable to retrieve file through existing connection; Get the file through a new VFS connection
if (removeFile == null) {
removeFile = KettleVFS.getFileObject(fileremoventry, this);
}
// Remove ONLY Files
if (removeFile.getType() == FileType.FILE) {
boolean deletefile = removeFile.delete();
logBasic(" ------ ");
if (!deletefile) {
logError(" " + BaseMessages.getString(PKG, "JobCopyFiles.Error.Exception.CanRemoveFileFolder", KettleVFS.getFriendlyURI(fileremoventry)));
} else {
if (isDetailed()) {
logDetailed(" " + BaseMessages.getString(PKG, "JobCopyFiles.Log.FileFolderRemoved", KettleVFS.getFriendlyURI(fileremoventry)));
}
}
}
}
}
// Add files to result files name
if (add_result_filesname && !list_add_result.isEmpty()) {
String destinationFilefoldername = destinationfilefolder.toString();
int trimPathLength = destinationFilefoldername.length() + 1;
FileObject addFile;
for (Iterator<String> iter = list_add_result.iterator(); iter.hasNext(); ) {
String fileaddentry = iter.next();
// re=null each iteration
addFile = null;
// Try to get the file relative to the existing connection
if (fileaddentry.startsWith(destinationFilefoldername)) {
if (trimPathLength < fileaddentry.length()) {
addFile = destinationfilefolder.getChild(fileaddentry.substring(trimPathLength));
}
}
// Unable to retrieve file through existing connection; Get the file through a new VFS connection
if (addFile == null) {
addFile = KettleVFS.getFileObject(fileaddentry, this);
}
// Add ONLY Files
if (addFile.getType() == FileType.FILE) {
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, addFile, parentJob.getJobname(), toString());
result.getResultFiles().put(resultFile.getFile().toString(), resultFile);
if (isDetailed()) {
logDetailed(" ------ ");
logDetailed(" " + BaseMessages.getString(PKG, "JobCopyFiles.Log.FileAddedToResultFilesName", KettleVFS.getFriendlyURI(fileaddentry)));
}
}
}
}
}
entrystatus = true;
} else {
// Destination Folder or Parent folder is missing
logError(BaseMessages.getString(PKG, "JobCopyFiles.Error.DestinationFolderNotFound", KettleVFS.getFriendlyURI(realDestinationFilefoldername)));
}
} else {
logError(BaseMessages.getString(PKG, "JobCopyFiles.Error.SourceFileNotExists", KettleVFS.getFriendlyURI(realSourceFilefoldername)));
}
} catch (FileSystemException fse) {
logError(BaseMessages.getString(PKG, "JobCopyFiles.Error.Exception.CopyProcessFileSystemException", fse.getMessage()));
Throwable throwable = fse.getCause();
while (throwable != null) {
logError(BaseMessages.getString(PKG, "JobCopyFiles.Log.CausedBy", throwable.getMessage()));
throwable = throwable.getCause();
}
} catch (Exception e) {
logError(BaseMessages.getString(PKG, "JobCopyFiles.Error.Exception.CopyProcess", KettleVFS.getFriendlyURI(realSourceFilefoldername), KettleVFS.getFriendlyURI(realDestinationFilefoldername), e.getMessage()), e);
} finally {
if (sourcefilefolder != null) {
try {
sourcefilefolder.close();
sourcefilefolder = null;
} catch (IOException ex) {
/* Ignore */
}
}
if (destinationfilefolder != null) {
try {
destinationfilefolder.close();
destinationfilefolder = null;
} catch (IOException ex) {
/* Ignore */
}
}
}
return entrystatus;
}
use of org.pentaho.di.core.ResultFile in project pentaho-kettle by pentaho.
the class JobEntryCopyMoveResultFilenames method execute.
public Result execute(Result previousResult, int nr) {
Result result = previousResult;
result.setNrErrors(1);
result.setResult(false);
boolean deleteFile = getAction().equals("delete");
// Set Embedded NamedCluter MetatStore Provider Key so that it can be passed to VFS
if (parentJobMeta.getNamedClusterEmbedManager() != null) {
parentJobMeta.getNamedClusterEmbedManager().passEmbeddedMetastoreKey(this, parentJobMeta.getEmbeddedMetastoreProviderKey());
}
String realdestinationFolder = null;
if (!deleteFile) {
realdestinationFolder = environmentSubstitute(getDestinationFolder());
if (!CreateDestinationFolder(realdestinationFolder)) {
return result;
}
}
if (!Utils.isEmpty(wildcard)) {
wildcardPattern = Pattern.compile(environmentSubstitute(wildcard));
}
if (!Utils.isEmpty(wildcardexclude)) {
wildcardExcludePattern = Pattern.compile(environmentSubstitute(wildcardexclude));
}
if (previousResult != null) {
NrErrors = 0;
limitFiles = Const.toInt(environmentSubstitute(getNrErrorsLessThan()), 10);
NrErrors = 0;
NrSuccess = 0;
successConditionBroken = false;
successConditionBrokenExit = false;
FileObject file = null;
try {
int size = result.getResultFiles().size();
if (log.isBasic()) {
logBasic(BaseMessages.getString(PKG, "JobEntryCopyMoveResultFilenames.log.FilesFound", "" + size));
}
List<ResultFile> resultFiles = result.getResultFilesList();
if (resultFiles != null && resultFiles.size() > 0) {
for (Iterator<ResultFile> it = resultFiles.iterator(); it.hasNext() && !parentJob.isStopped(); ) {
if (successConditionBroken) {
logError(BaseMessages.getString(PKG, "JobEntryCopyMoveResultFilenames.Error.SuccessConditionbroken", "" + NrErrors));
throw new Exception(BaseMessages.getString(PKG, "JobEntryCopyMoveResultFilenames.Error.SuccessConditionbroken", "" + NrErrors));
}
ResultFile resultFile = it.next();
file = resultFile.getFile();
if (file != null && file.exists()) {
if (!specifywildcard || (CheckFileWildcard(file.getName().getBaseName(), wildcardPattern, true) && !CheckFileWildcard(file.getName().getBaseName(), wildcardExcludePattern, false) && specifywildcard)) {
// Copy or Move file
if (!processFile(file, realdestinationFolder, result, parentJob, deleteFile)) {
// Update Errors
updateErrors();
}
}
} else {
logError(BaseMessages.getString(PKG, "JobEntryCopyMoveResultFilenames.log.ErrorCanNotFindFile", file.toString()));
// Update Errors
updateErrors();
}
}
// end for
}
} catch (Exception e) {
logError(BaseMessages.getString(PKG, "JobEntryCopyMoveResultFilenames.Error", e.toString()));
} finally {
if (file != null) {
try {
file.close();
file = null;
} catch (Exception ex) {
/* Ignore */
}
}
}
}
// Success Condition
result.setNrErrors(NrErrors);
result.setNrLinesWritten(NrSuccess);
if (getSuccessStatus()) {
result.setResult(true);
}
return result;
}
Aggregations