use of org.apache.commons.vfs2.VFS in project jackrabbit by apache.
the class VFSUtils method getChildrenOfTypes.
private static List<FileObject> getChildrenOfTypes(FileObject folderObject, Set<FileType> fileTypes) throws DataStoreException {
try {
String folderBaseName = folderObject.getName().getBaseName();
FileObject[] children = folderObject.getChildren();
List<FileObject> files = new ArrayList<FileObject>(children.length);
String childBaseName;
for (int i = 0; i < children.length; i++) {
childBaseName = children[i].getName().getBaseName();
FileType fileType = null;
try {
fileType = children[i].getType();
} catch (FileSystemException notDetermineTypeEx) {
if (folderBaseName.equals(childBaseName)) {
// Ignore this case.
// Some WebDAV server or VFS seems to include the folder itself as child as imaginary file type,
// and throw FileSystemException saying "Could not determine the type of file" in this case.
} else {
throw notDetermineTypeEx;
}
}
if (fileType != null && fileTypes.contains(fileType)) {
files.add(children[i]);
}
}
return files;
} catch (FileSystemException e) {
throw new DataStoreException("Could not find children under " + folderObject.getName().getFriendlyURI(), e);
}
}
use of org.apache.commons.vfs2.VFS in project pentaho-kettle by pentaho.
the class BaseFileInputStep method openNextFile.
/**
* Open next VFS file for processing.
*
* This method will support different parallelization methods later.
*/
protected boolean openNextFile() {
try {
if (data.currentFileIndex >= data.files.nrOfFiles()) {
// all files already processed
return false;
}
// Is this the last file?
data.file = data.files.getFile(data.currentFileIndex);
data.filename = KettleVFS.getFilename(data.file);
fillFileAdditionalFields(data, data.file);
if (meta.inputFiles.passingThruFields) {
data.currentPassThruFieldsRow = data.passThruFields.get(data.file);
}
//
if (meta.inputFiles.isaddresult) {
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, data.file, getTransMeta().getName(), toString());
resultFile.setComment("File was read by an Text File input step");
addResultFile(resultFile);
}
if (log.isBasic()) {
logBasic("Opening file: " + data.file.getName().getFriendlyURI());
}
data.dataErrorLineHandler.handleFile(data.file);
data.reader = createReader(meta, data, data.file);
} catch (Exception e) {
if (!handleOpenFileException(e)) {
return false;
}
data.reader = null;
}
// Move file pointer ahead!
data.currentFileIndex++;
return true;
}
use of org.apache.commons.vfs2.VFS in project pentaho-kettle by pentaho.
the class JobEntryUnZip method execute.
public Result execute(Result previousResult, int nr) {
Result result = previousResult;
result.setResult(false);
result.setNrErrors(1);
List<RowMetaAndData> rows = result.getRows();
RowMetaAndData resultRow = null;
String realFilenameSource = environmentSubstitute(zipFilename);
String realWildcardSource = environmentSubstitute(wildcardSource);
String realWildcard = environmentSubstitute(wildcard);
String realWildcardExclude = environmentSubstitute(wildcardexclude);
String realTargetdirectory = environmentSubstitute(sourcedirectory);
String realMovetodirectory = environmentSubstitute(movetodirectory);
// Set Embedded NamedCluter MetatStore Provider Key so that it can be passed to VFS
if (parentJobMeta.getNamedClusterEmbedManager() != null) {
parentJobMeta.getNamedClusterEmbedManager().passEmbeddedMetastoreKey(this, parentJobMeta.getEmbeddedMetastoreProviderKey());
}
limitFiles = Const.toInt(environmentSubstitute(getLimit()), 10);
NrErrors = 0;
NrSuccess = 0;
successConditionBroken = false;
successConditionBrokenExit = false;
if (isfromprevious) {
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobUnZip.Log.ArgFromPrevious.Found", (rows != null ? rows.size() : 0) + ""));
}
if (rows.size() == 0) {
return result;
}
} else {
if (Utils.isEmpty(zipFilename)) {
// Zip file/folder is missing
logError(BaseMessages.getString(PKG, "JobUnZip.No_ZipFile_Defined.Label"));
return result;
}
}
FileObject fileObject = null;
FileObject targetdir = null;
FileObject movetodir = null;
try {
if (Utils.isEmpty(realTargetdirectory)) {
logError(BaseMessages.getString(PKG, "JobUnZip.Error.TargetFolderMissing"));
return result;
}
boolean exitjobentry = false;
// Target folder
targetdir = KettleVFS.getFileObject(realTargetdirectory, this);
if (!targetdir.exists()) {
if (createfolder) {
targetdir.createFolder();
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobUnZip.Log.TargetFolderCreated", realTargetdirectory));
}
} else {
log.logError(BaseMessages.getString(PKG, "JobUnZip.TargetFolderNotFound.Label"));
exitjobentry = true;
}
} else {
if (!(targetdir.getType() == FileType.FOLDER)) {
log.logError(BaseMessages.getString(PKG, "JobUnZip.TargetFolderNotFolder.Label", realTargetdirectory));
exitjobentry = true;
} else {
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobUnZip.TargetFolderExists.Label", realTargetdirectory));
}
}
}
// movetodirectory must be provided
if (afterunzip == 2) {
if (Utils.isEmpty(movetodirectory)) {
log.logError(BaseMessages.getString(PKG, "JobUnZip.MoveToDirectoryEmpty.Label"));
exitjobentry = true;
} else {
movetodir = KettleVFS.getFileObject(realMovetodirectory, this);
if (!(movetodir.exists()) || !(movetodir.getType() == FileType.FOLDER)) {
if (createMoveToDirectory) {
movetodir.createFolder();
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobUnZip.Log.MoveToFolderCreated", realMovetodirectory));
}
} else {
log.logError(BaseMessages.getString(PKG, "JobUnZip.MoveToDirectoryNotExists.Label"));
exitjobentry = true;
}
}
}
}
// We found errors...now exit
if (exitjobentry) {
return result;
}
if (isfromprevious) {
if (rows != null) {
// Copy the input row to the (command line) arguments
for (int iteration = 0; iteration < rows.size() && !parentJob.isStopped(); iteration++) {
if (successConditionBroken) {
if (!successConditionBrokenExit) {
logError(BaseMessages.getString(PKG, "JobUnZip.Error.SuccessConditionbroken", "" + NrErrors));
successConditionBrokenExit = true;
}
result.setNrErrors(NrErrors);
return result;
}
resultRow = rows.get(iteration);
// Get sourcefile/folder and wildcard
realFilenameSource = resultRow.getString(0, null);
realWildcardSource = resultRow.getString(1, null);
fileObject = KettleVFS.getFileObject(realFilenameSource, this);
if (fileObject.exists()) {
processOneFile(result, parentJob, fileObject, realTargetdirectory, realWildcard, realWildcardExclude, movetodir, realMovetodirectory, realWildcardSource);
} else {
updateErrors();
logError(BaseMessages.getString(PKG, "JobUnZip.Error.CanNotFindFile", realFilenameSource));
}
}
}
} else {
fileObject = KettleVFS.getFileObject(realFilenameSource, this);
if (!fileObject.exists()) {
log.logError(BaseMessages.getString(PKG, "JobUnZip.ZipFile.NotExists.Label", realFilenameSource));
return result;
}
if (log.isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "JobUnZip.Zip_FileExists.Label", realFilenameSource));
}
if (Utils.isEmpty(sourcedirectory)) {
log.logError(BaseMessages.getString(PKG, "JobUnZip.SourceFolderNotFound.Label"));
return result;
}
processOneFile(result, parentJob, fileObject, realTargetdirectory, realWildcard, realWildcardExclude, movetodir, realMovetodirectory, realWildcardSource);
}
} catch (Exception e) {
log.logError(BaseMessages.getString(PKG, "JobUnZip.ErrorUnzip.Label", realFilenameSource, e.getMessage()));
updateErrors();
} finally {
if (fileObject != null) {
try {
fileObject.close();
} catch (IOException ex) {
/* Ignore */
}
}
if (targetdir != null) {
try {
targetdir.close();
} catch (IOException ex) {
/* Ignore */
}
}
if (movetodir != null) {
try {
movetodir.close();
} catch (IOException ex) {
/* Ignore */
}
}
}
result.setNrErrors(NrErrors);
result.setNrLinesWritten(NrSuccess);
if (getSuccessStatus()) {
result.setResult(true);
}
displayResults();
return result;
}
use of org.apache.commons.vfs2.VFS in project pentaho-kettle by pentaho.
the class JobEntryWaitForFile method execute.
public Result execute(Result previousResult, int nr) {
Result result = previousResult;
result.setResult(false);
// starttime (in seconds)
long timeStart = System.currentTimeMillis() / 1000;
if (filename != null) {
FileObject fileObject = null;
String realFilename = getRealFilename();
// Set Embedded NamedCluter MetatStore Provider Key so that it can be passed to VFS
if (parentJobMeta.getNamedClusterEmbedManager() != null) {
parentJobMeta.getNamedClusterEmbedManager().passEmbeddedMetastoreKey(this, parentJobMeta.getEmbeddedMetastoreProviderKey());
}
try {
fileObject = KettleVFS.getFileObject(realFilename, this);
long iMaximumTimeout = Const.toInt(getRealMaximumTimeout(), Const.toInt(DEFAULT_MAXIMUM_TIMEOUT, 0));
long iCycleTime = Const.toInt(getRealCheckCycleTime(), Const.toInt(DEFAULT_CHECK_CYCLE_TIME, 0));
//
if (iMaximumTimeout < 0) {
iMaximumTimeout = Const.toInt(DEFAULT_MAXIMUM_TIMEOUT, 0);
if (log.isBasic()) {
logBasic("Maximum timeout invalid, reset to " + iMaximumTimeout);
}
}
if (iCycleTime < 1) {
// If lower than 1 set to the default
iCycleTime = Const.toInt(DEFAULT_CHECK_CYCLE_TIME, 1);
if (log.isBasic()) {
logBasic("Check cycle time invalid, reset to " + iCycleTime);
}
}
if (iMaximumTimeout == 0) {
if (log.isBasic()) {
logBasic("Waiting indefinitely for file [" + realFilename + "]");
}
} else {
if (log.isBasic()) {
logBasic("Waiting " + iMaximumTimeout + " seconds for file [" + realFilename + "]");
}
}
boolean continueLoop = true;
while (continueLoop && !parentJob.isStopped()) {
fileObject = KettleVFS.getFileObject(realFilename, this);
if (fileObject.exists()) {
// file exists, we're happy to exit
if (log.isBasic()) {
logBasic("Detected file [" + realFilename + "] within timeout");
}
result.setResult(true);
continueLoop = false;
// add filename to result filenames
if (addFilenameToResult && fileObject.getType() == FileType.FILE) {
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, fileObject, parentJob.getJobname(), toString());
resultFile.setComment(BaseMessages.getString(PKG, "JobWaitForFile.FilenameAdded"));
result.getResultFiles().put(resultFile.getFile().toString(), resultFile);
}
} else {
long now = System.currentTimeMillis() / 1000;
if ((iMaximumTimeout > 0) && (now > (timeStart + iMaximumTimeout))) {
continueLoop = false;
// file doesn't exist after timeout, either true or false
if (isSuccessOnTimeout()) {
if (log.isBasic()) {
logBasic("Didn't detect file [" + realFilename + "] before timeout, success");
}
result.setResult(true);
} else {
if (log.isBasic()) {
logBasic("Didn't detect file [" + realFilename + "] before timeout, failure");
}
result.setResult(false);
}
}
// sleep algorithm
long sleepTime = 0;
if (iMaximumTimeout == 0) {
sleepTime = iCycleTime;
} else {
if ((now + iCycleTime) < (timeStart + iMaximumTimeout)) {
sleepTime = iCycleTime;
} else {
sleepTime = iCycleTime - ((now + iCycleTime) - (timeStart + iMaximumTimeout));
}
}
try {
if (sleepTime > 0) {
if (log.isDetailed()) {
logDetailed("Sleeping " + sleepTime + " seconds before next check for file [" + realFilename + "]");
}
Thread.sleep(sleepTime * 1000);
}
} catch (InterruptedException e) {
// something strange happened
result.setResult(false);
continueLoop = false;
}
}
}
if (!parentJob.isStopped() && fileObject.exists() && isFileSizeCheck()) {
long oldSize = -1;
long newSize = fileObject.getContent().getSize();
if (log.isDetailed()) {
logDetailed("File [" + realFilename + "] is " + newSize + " bytes long");
}
if (log.isBasic()) {
logBasic("Waiting until file [" + realFilename + "] stops growing for " + iCycleTime + " seconds");
}
while (oldSize != newSize && !parentJob.isStopped()) {
try {
if (log.isDetailed()) {
logDetailed("Sleeping " + iCycleTime + " seconds, waiting for file [" + realFilename + "] to stop growing");
}
Thread.sleep(iCycleTime * 1000);
} catch (InterruptedException e) {
// something strange happened
result.setResult(false);
continueLoop = false;
}
oldSize = newSize;
newSize = fileObject.getContent().getSize();
if (log.isDetailed()) {
logDetailed("File [" + realFilename + "] is " + newSize + " bytes long");
}
}
if (log.isBasic()) {
logBasic("Stopped waiting for file [" + realFilename + "] to stop growing");
}
}
if (parentJob.isStopped()) {
result.setResult(false);
}
} catch (Exception e) {
logBasic("Exception while waiting for file [" + realFilename + "] to stop growing", e);
} finally {
if (fileObject != null) {
try {
fileObject.close();
} catch (Exception e) {
// Ignore errors
}
}
}
} else {
logError("No filename is defined.");
}
return result;
}
use of org.apache.commons.vfs2.VFS in project pentaho-kettle by pentaho.
the class JobEntryJob method execute.
@Override
public Result execute(Result result, int nr) throws KettleException {
result.setEntryNr(nr);
LogChannelFileWriter logChannelFileWriter = null;
LogLevel jobLogLevel = parentJob.getLogLevel();
// Set Embedded NamedCluter MetatStore Provider Key so that it can be passed to VFS
if (parentJobMeta.getNamedClusterEmbedManager() != null) {
parentJobMeta.getNamedClusterEmbedManager().passEmbeddedMetastoreKey(this, parentJobMeta.getEmbeddedMetastoreProviderKey());
}
if (setLogfile) {
String realLogFilename = environmentSubstitute(getLogFilename());
// if we do not have one, we must fail
if (Utils.isEmpty(realLogFilename)) {
logError(BaseMessages.getString(PKG, "JobJob.Exception.LogFilenameMissing"));
result.setNrErrors(1);
result.setResult(false);
return result;
}
// create parent folder?
if (!createParentFolder(realLogFilename)) {
result.setNrErrors(1);
result.setResult(false);
return result;
}
try {
logChannelFileWriter = new LogChannelFileWriter(this.getLogChannelId(), KettleVFS.getFileObject(realLogFilename), setAppendLogfile);
logChannelFileWriter.startLogging();
} catch (KettleException e) {
logError("Unable to open file appender for file [" + getLogFilename() + "] : " + e.toString());
logError(Const.getStackTracker(e));
result.setNrErrors(1);
result.setResult(false);
return result;
}
jobLogLevel = logFileLevel;
}
try {
// First load the job, outside of the loop...
if (parentJob.getJobMeta() != null) {
// reset the internal variables again.
// Maybe we should split up the variables even more like in UNIX shells.
// The internal variables need to be reset to be able use them properly
// in 2 sequential sub jobs.
parentJob.getJobMeta().setInternalKettleVariables();
}
//
switch(specificationMethod) {
case REPOSITORY_BY_NAME:
if (log.isDetailed()) {
logDetailed("Loading job from repository : [" + directory + " : " + environmentSubstitute(jobname) + "]");
}
break;
case FILENAME:
if (log.isDetailed()) {
logDetailed("Loading job from XML file : [" + environmentSubstitute(filename) + "]");
}
break;
case REPOSITORY_BY_REFERENCE:
if (log.isDetailed()) {
logDetailed("Loading job from repository by reference : [" + jobObjectId + "]");
}
break;
default:
break;
}
JobMeta jobMeta = getJobMeta(rep, this);
//
if (jobMeta == null) {
throw new KettleException("Unable to load the job: please specify the name and repository directory OR a filename");
}
verifyRecursiveExecution(parentJob, jobMeta);
int iteration = 0;
String[] args1 = arguments;
// no arguments? Check the parent jobs arguments
if (args1 == null || args1.length == 0) {
args1 = parentJob.getArguments();
}
copyVariablesFrom(parentJob);
setParentVariableSpace(parentJob);
//
// For the moment only do variable translation at the start of a job, not
// for every input row (if that would be switched on)
//
String[] args = null;
if (args1 != null) {
args = new String[args1.length];
for (int idx = 0; idx < args1.length; idx++) {
args[idx] = environmentSubstitute(args1[idx]);
}
}
RowMetaAndData resultRow = null;
boolean first = true;
List<RowMetaAndData> rows = new ArrayList<RowMetaAndData>(result.getRows());
while ((first && !execPerRow) || (execPerRow && rows != null && iteration < rows.size() && result.getNrErrors() == 0)) {
first = false;
//
if (execPerRow) {
result.getRows().clear();
}
if (rows != null && execPerRow) {
resultRow = rows.get(iteration);
} else {
resultRow = null;
}
NamedParams namedParam = new NamedParamsDefault();
//
if (paramsFromPrevious) {
String[] parentParameters = parentJob.listParameters();
for (int idx = 0; idx < parentParameters.length; idx++) {
String par = parentParameters[idx];
String def = parentJob.getParameterDefault(par);
String val = parentJob.getParameterValue(par);
String des = parentJob.getParameterDescription(par);
namedParam.addParameterDefinition(par, def, des);
namedParam.setParameterValue(par, val);
}
}
//
if (parameters != null) {
for (int idx = 0; idx < parameters.length; idx++) {
if (!Utils.isEmpty(parameters[idx])) {
//
if (Const.indexOfString(parameters[idx], namedParam.listParameters()) < 0) {
// We have a parameter
try {
namedParam.addParameterDefinition(parameters[idx], "", "Job entry runtime");
} catch (DuplicateParamException e) {
// Should never happen
//
logError("Duplicate parameter definition for " + parameters[idx]);
}
}
if (Utils.isEmpty(Const.trim(parameterFieldNames[idx]))) {
namedParam.setParameterValue(parameters[idx], Const.NVL(environmentSubstitute(parameterValues[idx]), ""));
} else {
// something filled in, in the field column...
//
String value = "";
if (resultRow != null) {
value = resultRow.getString(parameterFieldNames[idx], "");
}
namedParam.setParameterValue(parameters[idx], value);
}
}
}
}
Result oneResult = new Result();
List<RowMetaAndData> sourceRows = null;
if (execPerRow) {
if (argFromPrevious) {
// Copy the input row to the (command line) arguments
args = null;
if (resultRow != null) {
args = new String[resultRow.size()];
for (int i = 0; i < resultRow.size(); i++) {
args[i] = resultRow.getString(i, null);
}
}
} else {
// Just pass a single row
List<RowMetaAndData> newList = new ArrayList<RowMetaAndData>();
newList.add(resultRow);
sourceRows = newList;
}
if (paramsFromPrevious) {
if (parameters != null) {
for (int idx = 0; idx < parameters.length; idx++) {
if (!Utils.isEmpty(parameters[idx])) {
// We have a parameter
if (Utils.isEmpty(Const.trim(parameterFieldNames[idx]))) {
namedParam.setParameterValue(parameters[idx], Const.NVL(environmentSubstitute(parameterValues[idx]), ""));
} else {
String fieldValue = "";
if (resultRow != null) {
fieldValue = resultRow.getString(parameterFieldNames[idx], "");
}
// Get the value from the input stream
namedParam.setParameterValue(parameters[idx], Const.NVL(fieldValue, ""));
}
}
}
}
}
} else {
if (argFromPrevious) {
// Only put the first Row on the arguments
args = null;
if (resultRow != null) {
args = new String[resultRow.size()];
for (int i = 0; i < resultRow.size(); i++) {
args[i] = resultRow.getString(i, null);
}
}
} else {
// Keep it as it was...
sourceRows = result.getRows();
}
if (paramsFromPrevious) {
if (parameters != null) {
for (int idx = 0; idx < parameters.length; idx++) {
if (!Utils.isEmpty(parameters[idx])) {
// We have a parameter
if (Utils.isEmpty(Const.trim(parameterFieldNames[idx]))) {
namedParam.setParameterValue(parameters[idx], Const.NVL(environmentSubstitute(parameterValues[idx]), ""));
} else {
String fieldValue = "";
if (resultRow != null) {
fieldValue = resultRow.getString(parameterFieldNames[idx], "");
}
// Get the value from the input stream
namedParam.setParameterValue(parameters[idx], Const.NVL(fieldValue, ""));
}
}
}
}
}
}
boolean doFallback = true;
SlaveServer remoteSlaveServer = null;
JobExecutionConfiguration executionConfiguration = new JobExecutionConfiguration();
if (!Utils.isEmpty(runConfiguration)) {
log.logBasic(BaseMessages.getString(PKG, "JobJob.RunConfig.Message"), runConfiguration);
runConfiguration = environmentSubstitute(runConfiguration);
executionConfiguration.setRunConfiguration(runConfiguration);
try {
ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.SpoonTransBeforeStart.id, new Object[] { executionConfiguration, parentJob.getJobMeta(), jobMeta, rep });
if (!executionConfiguration.isExecutingLocally() && !executionConfiguration.isExecutingRemotely()) {
result.setResult(true);
return result;
}
remoteSlaveServer = executionConfiguration.getRemoteServer();
doFallback = false;
} catch (KettleException e) {
log.logError(e.getMessage(), getName());
result.setNrErrors(1);
result.setResult(false);
return result;
}
}
if (doFallback) {
//
if (!Utils.isEmpty(remoteSlaveServerName)) {
String realRemoteSlaveServerName = environmentSubstitute(remoteSlaveServerName);
remoteSlaveServer = parentJob.getJobMeta().findSlaveServer(realRemoteSlaveServerName);
if (remoteSlaveServer == null) {
throw new KettleException(BaseMessages.getString(PKG, "JobTrans.Exception.UnableToFindRemoteSlaveServer", realRemoteSlaveServerName));
}
}
}
if (remoteSlaveServer == null) {
// Local execution...
//
// Create a new job
//
job = new Job(rep, jobMeta, this);
job.setParentJob(parentJob);
job.setLogLevel(jobLogLevel);
job.shareVariablesWith(this);
job.setInternalKettleVariables(this);
job.copyParametersFrom(jobMeta);
job.setInteractive(parentJob.isInteractive());
if (job.isInteractive()) {
job.getJobEntryListeners().addAll(parentJob.getJobEntryListeners());
}
// Pass the socket repository all around.
//
job.setSocketRepository(parentJob.getSocketRepository());
// Set the parameters calculated above on this instance.
//
job.clearParameters();
String[] parameterNames = job.listParameters();
for (int idx = 0; idx < parameterNames.length; idx++) {
// Grab the parameter value set in the job entry
//
String thisValue = namedParam.getParameterValue(parameterNames[idx]);
if (!Utils.isEmpty(thisValue)) {
// Set the value as specified by the user in the job entry
//
job.setParameterValue(parameterNames[idx], thisValue);
} else {
//
if (isPassingAllParameters()) {
String parentValue = parentJob.getParameterValue(parameterNames[idx]);
if (!Utils.isEmpty(parentValue)) {
job.setParameterValue(parameterNames[idx], parentValue);
}
}
}
}
job.activateParameters();
// Set the source rows we calculated above...
//
job.setSourceRows(sourceRows);
// Don't forget the logging...
job.beginProcessing();
// Link the job with the sub-job
parentJob.getJobTracker().addJobTracker(job.getJobTracker());
// Link both ways!
job.getJobTracker().setParentJobTracker(parentJob.getJobTracker());
if (parentJob.getJobMeta().isBatchIdPassed()) {
job.setPassedBatchId(parentJob.getBatchId());
}
job.setArguments(args);
//
for (DelegationListener delegationListener : parentJob.getDelegationListeners()) {
// TODO: copy some settings in the job execution configuration, not strictly needed
// but the execution configuration information is useful in case of a job re-start
//
delegationListener.jobDelegationStarted(job, new JobExecutionConfiguration());
}
JobEntryJobRunner runner = new JobEntryJobRunner(job, result, nr, log);
Thread jobRunnerThread = new Thread(runner);
// PDI-6518
// added UUID to thread name, otherwise threads do share names if jobs entries are executed in parallel in a
// parent job
// if that happens, contained transformations start closing each other's connections
jobRunnerThread.setName(Const.NVL(job.getJobMeta().getName(), job.getJobMeta().getFilename()) + " UUID: " + UUID.randomUUID().toString());
jobRunnerThread.start();
//
while (!runner.isFinished() && !parentJob.isStopped()) {
try {
Thread.sleep(0, 1);
} catch (InterruptedException e) {
// Ignore
}
}
// if the parent-job was stopped, stop the sub-job too...
if (parentJob.isStopped()) {
job.stopAll();
// Wait until finished!
runner.waitUntilFinished();
}
oneResult = runner.getResult();
} else {
// Make sure we can parameterize the slave server connection
//
remoteSlaveServer.shareVariablesWith(this);
// Remote execution...
//
JobExecutionConfiguration jobExecutionConfiguration = new JobExecutionConfiguration();
// lightClone() because rows are
jobExecutionConfiguration.setPreviousResult(result.lightClone());
// overwritten in next line.
jobExecutionConfiguration.getPreviousResult().setRows(sourceRows);
jobExecutionConfiguration.setArgumentStrings(args);
jobExecutionConfiguration.setVariables(this);
jobExecutionConfiguration.setRemoteServer(remoteSlaveServer);
jobExecutionConfiguration.setRepository(rep);
jobExecutionConfiguration.setLogLevel(jobLogLevel);
jobExecutionConfiguration.setPassingExport(passingExport);
jobExecutionConfiguration.setExpandingRemoteJob(expandingRemoteJob);
for (String param : namedParam.listParameters()) {
String defValue = namedParam.getParameterDefault(param);
String value = namedParam.getParameterValue(param);
jobExecutionConfiguration.getParams().put(param, Const.NVL(value, defValue));
}
if (parentJob.getJobMeta().isBatchIdPassed()) {
jobExecutionConfiguration.setPassedBatchId(parentJob.getBatchId());
}
// Send the XML over to the slave server
// Also start the job over there...
//
String carteObjectId = null;
try {
carteObjectId = Job.sendToSlaveServer(jobMeta, jobExecutionConfiguration, rep, metaStore);
} catch (KettleException e) {
// Perhaps the job exists on the remote server, carte is down, etc.
// This is an abort situation, stop the parent job...
// We want this in case we are running in parallel. The other job
// entries can stop running now.
//
parentJob.stopAll();
//
throw e;
}
// Now start the monitoring...
//
SlaveServerJobStatus jobStatus = null;
while (!parentJob.isStopped() && waitingToFinish) {
try {
jobStatus = remoteSlaveServer.getJobStatus(jobMeta.getName(), carteObjectId, 0);
if (jobStatus.getResult() != null) {
// The job is finished, get the result...
//
oneResult = jobStatus.getResult();
break;
}
} catch (Exception e1) {
logError("Unable to contact slave server [" + remoteSlaveServer + "] to verify the status of job [" + jobMeta.getName() + "]", e1);
oneResult.setNrErrors(1L);
// Stop looking too, chances are too low the server will
break;
// come back on-line
}
// sleep for 1 second
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// Ignore
}
}
// Write log from carte to file
if (setLogfile && jobStatus != null) {
String logFromCarte = jobStatus.getLoggingString();
if (!Utils.isEmpty(logFromCarte)) {
FileObject logfile = logChannelFileWriter.getLogFile();
OutputStream logFileOutputStream = null;
try {
logFileOutputStream = KettleVFS.getOutputStream(logfile, setAppendLogfile);
logFileOutputStream.write(logFromCarte.getBytes());
logFileOutputStream.flush();
} catch (Exception e) {
logError("There was an error logging to file '" + logfile + "'", e);
} finally {
try {
if (logFileOutputStream != null) {
logFileOutputStream.close();
logFileOutputStream = null;
}
} catch (Exception e) {
logError("There was an error closing log file file '" + logfile + "'", e);
}
}
}
}
if (!waitingToFinish) {
// Since the job was posted successfully, the result is true...
//
oneResult = new Result();
oneResult.setResult(true);
}
if (parentJob.isStopped()) {
try {
//
if (jobStatus == null || jobStatus.isRunning()) {
// Try a remote abort ...
//
remoteSlaveServer.stopJob(jobMeta.getName(), carteObjectId);
}
} catch (Exception e1) {
logError("Unable to contact slave server [" + remoteSlaveServer + "] to stop job [" + jobMeta.getName() + "]", e1);
oneResult.setNrErrors(1L);
// Stop looking too, chances are too low the server will
break;
// come back on-line
}
}
}
// clear only the numbers, NOT the files or rows.
result.clear();
result.add(oneResult);
// Set the result rows too, if any ...
if (!Utils.isEmpty(oneResult.getRows())) {
result.setRows(new ArrayList<RowMetaAndData>(oneResult.getRows()));
}
//
if (oneResult.getResult() == false) {
result.setNrErrors(result.getNrErrors() + 1);
}
iteration++;
}
} catch (KettleException ke) {
logError("Error running job entry 'job' : ", ke);
result.setResult(false);
result.setNrErrors(1L);
}
if (setLogfile) {
if (logChannelFileWriter != null) {
logChannelFileWriter.stopLogging();
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_LOG, logChannelFileWriter.getLogFile(), parentJob.getJobname(), getName());
result.getResultFiles().put(resultFile.getFile().toString(), resultFile);
//
if (logChannelFileWriter.getException() != null) {
logError("Unable to open log file [" + getLogFilename() + "] : ");
logError(Const.getStackTracker(logChannelFileWriter.getException()));
result.setNrErrors(1);
result.setResult(false);
return result;
}
}
}
if (result.getNrErrors() > 0) {
result.setResult(false);
} else {
result.setResult(true);
}
return result;
}
Aggregations