Search in sources :

Example 6 with RowMetaAndData

use of org.pentaho.di.core.RowMetaAndData in project pentaho-kettle by pentaho.

the class JobEntryDeleteFiles method populateDataForJobExecution.

/**
 * For job execution path to files and file masks should be provided.
 * These values can be obtained in two ways:
 * 1. As an argument of a current job entry
 * 2. As a table, that comes as a result of execution previous job/transformation.
 *
 * As the logic of processing this data is the same for both of this cases, we first
 * populate this data (in this method) and then process it.
 *
 * We are using guava multimap here, because if allows key duplication and there could be a
 * situation where two paths to one folder with different wildcards are provided.
 */
private Multimap<String, String> populateDataForJobExecution(List<RowMetaAndData> rowsFromPreviousMeta) throws KettleValueException {
    Multimap<String, String> pathToMaskMap = ArrayListMultimap.create();
    if (argFromPrevious && rowsFromPreviousMeta != null) {
        for (RowMetaAndData resultRow : rowsFromPreviousMeta) {
            if (resultRow.size() < 2) {
                logError(BaseMessages.getString(PKG, "JobDeleteFiles.Error.InvalidNumberOfRowsFromPrevMeta", resultRow.size()));
                return pathToMaskMap;
            }
            String pathToFile = resultRow.getString(0, null);
            String fileMask = resultRow.getString(1, null);
            if (log.isDetailed()) {
                logDetailed(BaseMessages.getString(PKG, "JobEntryDeleteFiles.ProcessingRow", pathToFile, fileMask));
            }
            pathToMaskMap.put(pathToFile, fileMask);
        }
    } else if (arguments != null) {
        for (int i = 0; i < arguments.length; i++) {
            if (log.isDetailed()) {
                logDetailed(BaseMessages.getString(PKG, "JobEntryDeleteFiles.ProcessingArg", arguments[i], filemasks[i]));
            }
            pathToMaskMap.put(arguments[i], filemasks[i]);
        }
    }
    return pathToMaskMap;
}
Also used : RowMetaAndData(org.pentaho.di.core.RowMetaAndData)

Example 7 with RowMetaAndData

use of org.pentaho.di.core.RowMetaAndData in project pentaho-kettle by pentaho.

the class JobEntryDeleteFolders method execute.

public Result execute(Result result, int nr) throws KettleException {
    List<RowMetaAndData> rows = result.getRows();
    result.setNrErrors(1);
    result.setResult(false);
    NrErrors = 0;
    NrSuccess = 0;
    successConditionBroken = false;
    successConditionBrokenExit = false;
    limitFolders = Const.toInt(environmentSubstitute(getLimitFolders()), 10);
    // Set Embedded NamedCluter MetatStore Provider Key so that it can be passed to VFS
    if (parentJobMeta.getNamedClusterEmbedManager() != null) {
        parentJobMeta.getNamedClusterEmbedManager().passEmbeddedMetastoreKey(this, parentJobMeta.getEmbeddedMetastoreProviderKey());
    }
    if (argFromPrevious) {
        if (log.isDetailed()) {
            logDetailed(BaseMessages.getString(PKG, "JobEntryDeleteFolders.FoundPreviousRows", String.valueOf((rows != null ? rows.size() : 0))));
        }
    }
    if (argFromPrevious && rows != null) {
        for (int iteration = 0; iteration < rows.size() && !parentJob.isStopped(); iteration++) {
            if (successConditionBroken) {
                logError(BaseMessages.getString(PKG, "JobEntryDeleteFolders.Error.SuccessConditionbroken", "" + NrErrors));
                result.setNrErrors(NrErrors);
                result.setNrLinesDeleted(NrSuccess);
                return result;
            }
            RowMetaAndData resultRow = rows.get(iteration);
            String args_previous = resultRow.getString(0, null);
            if (!Utils.isEmpty(args_previous)) {
                if (deleteFolder(args_previous)) {
                    updateSuccess();
                } else {
                    updateErrors();
                }
            } else {
                // empty filename !
                logError(BaseMessages.getString(PKG, "JobEntryDeleteFolders.Error.EmptyLine"));
            }
        }
    } else if (arguments != null) {
        for (int i = 0; i < arguments.length && !parentJob.isStopped(); i++) {
            if (successConditionBroken) {
                logError(BaseMessages.getString(PKG, "JobEntryDeleteFolders.Error.SuccessConditionbroken", "" + NrErrors));
                result.setNrErrors(NrErrors);
                result.setNrLinesDeleted(NrSuccess);
                return result;
            }
            String realfilename = environmentSubstitute(arguments[i]);
            if (!Utils.isEmpty(realfilename)) {
                if (deleteFolder(realfilename)) {
                    updateSuccess();
                } else {
                    updateErrors();
                }
            } else {
                // empty filename !
                logError(BaseMessages.getString(PKG, "JobEntryDeleteFolders.Error.EmptyLine"));
            }
        }
    }
    if (log.isDetailed()) {
        logDetailed("=======================================");
        logDetailed(BaseMessages.getString(PKG, "JobEntryDeleteFolders.Log.Info.NrError", "" + NrErrors));
        logDetailed(BaseMessages.getString(PKG, "JobEntryDeleteFolders.Log.Info.NrDeletedFolders", "" + NrSuccess));
        logDetailed("=======================================");
    }
    result.setNrErrors(NrErrors);
    result.setNrLinesDeleted(NrSuccess);
    if (getSuccessStatus()) {
        result.setResult(true);
    }
    return result;
}
Also used : RowMetaAndData(org.pentaho.di.core.RowMetaAndData)

Example 8 with RowMetaAndData

use of org.pentaho.di.core.RowMetaAndData in project pentaho-kettle by pentaho.

the class Trans method calculateBatchIdAndDateRange.

/**
 * Calculate the batch id and date range for the transformation.
 *
 * @throws KettleTransException
 *           if there are any errors during calculation
 */
public void calculateBatchIdAndDateRange() throws KettleTransException {
    TransLogTable transLogTable = transMeta.getTransLogTable();
    currentDate = new Date();
    logDate = new Date();
    startDate = Const.MIN_DATE;
    endDate = currentDate;
    DatabaseMeta logConnection = transLogTable.getDatabaseMeta();
    String logTable = environmentSubstitute(transLogTable.getActualTableName());
    String logSchema = environmentSubstitute(transLogTable.getActualSchemaName());
    try {
        if (logConnection != null) {
            String logSchemaAndTable = logConnection.getQuotedSchemaTableCombination(logSchema, logTable);
            if (Utils.isEmpty(logTable)) {
                // to log to.
                throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.NoLogTableDefined"));
            }
            if (Utils.isEmpty(transMeta.getName()) && logConnection != null && logTable != null) {
                throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.NoTransnameAvailableForLogging"));
            }
            transLogTableDatabaseConnection = new Database(this, logConnection);
            transLogTableDatabaseConnection.shareVariablesWith(this);
            if (log.isDetailed()) {
                log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.OpeningLogConnection", "" + logConnection));
            }
            transLogTableDatabaseConnection.connect();
            transLogTableDatabaseConnection.setCommit(logCommitSize);
            // 
            if (transLogTable.isBatchIdUsed()) {
                Long id_batch = logConnection.getNextBatchId(transLogTableDatabaseConnection, logSchema, logTable, transLogTable.getKeyField().getFieldName());
                setBatchId(id_batch.longValue());
            }
            // 
            // Get the date range from the logging table: from the last end_date to now. (currentDate)
            // 
            Object[] lastr = transLogTableDatabaseConnection.getLastLogDate(logSchemaAndTable, transMeta.getName(), false, LogStatus.END);
            if (lastr != null && lastr.length > 0) {
                startDate = (Date) lastr[0];
                if (log.isDetailed()) {
                    log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.StartDateFound") + startDate);
                }
            }
            // 
            if (transMeta.getMaxDateConnection() != null && transMeta.getMaxDateTable() != null && transMeta.getMaxDateTable().length() > 0 && transMeta.getMaxDateField() != null && transMeta.getMaxDateField().length() > 0) {
                if (log.isDetailed()) {
                    log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.LookingForMaxdateConnection", "" + transMeta.getMaxDateConnection()));
                }
                DatabaseMeta maxcon = transMeta.getMaxDateConnection();
                if (maxcon != null) {
                    Database maxdb = new Database(this, maxcon);
                    maxdb.shareVariablesWith(this);
                    try {
                        if (log.isDetailed()) {
                            log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.OpeningMaximumDateConnection"));
                        }
                        maxdb.connect();
                        maxdb.setCommit(logCommitSize);
                        // 
                        // Determine the endDate by looking at a field in a table...
                        // 
                        String sql = "SELECT MAX(" + transMeta.getMaxDateField() + ") FROM " + transMeta.getMaxDateTable();
                        RowMetaAndData r1 = maxdb.getOneRow(sql);
                        if (r1 != null) {
                            // OK, we have a value, what's the offset?
                            Date maxvalue = r1.getRowMeta().getDate(r1.getData(), 0);
                            if (maxvalue != null) {
                                if (log.isDetailed()) {
                                    log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.LastDateFoundOnTheMaxdateConnection") + r1);
                                }
                                endDate.setTime((long) (maxvalue.getTime() + (transMeta.getMaxDateOffset() * 1000)));
                            }
                        } else {
                            if (log.isDetailed()) {
                                log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.NoLastDateFoundOnTheMaxdateConnection"));
                            }
                        }
                    } catch (KettleException e) {
                        throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ErrorConnectingToDatabase", "" + transMeta.getMaxDateConnection()), e);
                    } finally {
                        maxdb.disconnect();
                    }
                } else {
                    throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.MaximumDateConnectionCouldNotBeFound", "" + transMeta.getMaxDateConnection()));
                }
            }
            // Get the maximum in depdate...
            if (transMeta.nrDependencies() > 0) {
                if (log.isDetailed()) {
                    log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.CheckingForMaxDependencyDate"));
                }
                // 
                // Maybe one of the tables where this transformation is dependent on has changed?
                // If so we need to change the start-date!
                // 
                depDate = Const.MIN_DATE;
                Date maxdepdate = Const.MIN_DATE;
                if (lastr != null && lastr.length > 0) {
                    // #1: last depdate
                    Date dep = (Date) lastr[1];
                    if (dep != null) {
                        maxdepdate = dep;
                        depDate = dep;
                    }
                }
                for (int i = 0; i < transMeta.nrDependencies(); i++) {
                    TransDependency td = transMeta.getDependency(i);
                    DatabaseMeta depcon = td.getDatabase();
                    if (depcon != null) {
                        Database depdb = new Database(this, depcon);
                        try {
                            depdb.connect();
                            depdb.setCommit(logCommitSize);
                            String sql = "SELECT MAX(" + td.getFieldname() + ") FROM " + td.getTablename();
                            RowMetaAndData r1 = depdb.getOneRow(sql);
                            if (r1 != null) {
                                // OK, we have a row, get the result!
                                Date maxvalue = (Date) r1.getData()[0];
                                if (maxvalue != null) {
                                    if (log.isDetailed()) {
                                        log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.FoundDateFromTable", td.getTablename(), "." + td.getFieldname(), " = " + maxvalue.toString()));
                                    }
                                    if (maxvalue.getTime() > maxdepdate.getTime()) {
                                        maxdepdate = maxvalue;
                                    }
                                } else {
                                    throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.UnableToGetDependencyInfoFromDB", td.getDatabase().getName() + ".", td.getTablename() + ".", td.getFieldname()));
                                }
                            } else {
                                throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.UnableToGetDependencyInfoFromDB", td.getDatabase().getName() + ".", td.getTablename() + ".", td.getFieldname()));
                            }
                        } catch (KettleException e) {
                            throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ErrorInDatabase", "" + td.getDatabase()), e);
                        } finally {
                            depdb.disconnect();
                        }
                    } else {
                        throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ConnectionCouldNotBeFound", "" + td.getDatabase()));
                    }
                    if (log.isDetailed()) {
                        log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.Maxdepdate") + (XMLHandler.date2string(maxdepdate)));
                    }
                }
                // 
                if (maxdepdate.getTime() > depDate.getTime()) {
                    depDate = maxdepdate;
                    startDate = Const.MIN_DATE;
                }
            } else {
                depDate = currentDate;
            }
        }
        // OK, now we have a date-range. See if we need to set a maximum!
        if (// Do we have a difference specified?
        transMeta.getMaxDateDifference() > 0.0 && // Is the startdate > Minimum?
        startDate.getTime() > Const.MIN_DATE.getTime()) {
            // See if the end-date is larger then Start_date + DIFF?
            Date maxdesired = new Date(startDate.getTime() + ((long) transMeta.getMaxDateDifference() * 1000));
            // 
            if (endDate.compareTo(maxdesired) > 0) {
                endDate = maxdesired;
            }
        }
    } catch (KettleException e) {
        throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ErrorCalculatingDateRange", logTable), e);
    }
// Be careful, We DO NOT close the trans log table database connection!!!
// It's closed later in beginProcessing() to prevent excessive connect/disconnect repetitions.
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) ValueMetaString(org.pentaho.di.core.row.value.ValueMetaString) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta) Date(java.util.Date) KettleExtensionPoint(org.pentaho.di.core.extension.KettleExtensionPoint) RowMetaAndData(org.pentaho.di.core.RowMetaAndData) Database(org.pentaho.di.core.database.Database) TransLogTable(org.pentaho.di.core.logging.TransLogTable) KettleTransException(org.pentaho.di.core.exception.KettleTransException) FileObject(org.apache.commons.vfs2.FileObject)

Example 9 with RowMetaAndData

use of org.pentaho.di.core.RowMetaAndData in project pentaho-kettle by pentaho.

the class Trans method writeStepPerformanceLogRecords.

/**
 * Write step performance log records.
 *
 * @param startSequenceNr
 *          the start sequence numberr
 * @param status
 *          the logging status. If this is End, perform cleanup
 * @return the new sequence number
 * @throws KettleException
 *           if any errors occur during logging
 */
private int writeStepPerformanceLogRecords(int startSequenceNr, LogStatus status) throws KettleException {
    int lastSeqNr = 0;
    Database ldb = null;
    PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable();
    if (!performanceLogTable.isDefined() || !transMeta.isCapturingStepPerformanceSnapShots() || stepPerformanceSnapShots == null || stepPerformanceSnapShots.isEmpty()) {
        // nothing to do here!
        return 0;
    }
    try {
        ldb = new Database(this, performanceLogTable.getDatabaseMeta());
        ldb.shareVariablesWith(this);
        ldb.connect();
        ldb.setCommit(logCommitSize);
        // Write to the step performance log table...
        // 
        RowMetaInterface rowMeta = performanceLogTable.getLogRecord(LogStatus.START, null, null).getRowMeta();
        ldb.prepareInsert(rowMeta, performanceLogTable.getActualSchemaName(), performanceLogTable.getActualTableName());
        synchronized (stepPerformanceSnapShots) {
            Iterator<List<StepPerformanceSnapShot>> iterator = stepPerformanceSnapShots.values().iterator();
            while (iterator.hasNext()) {
                List<StepPerformanceSnapShot> snapshots = iterator.next();
                synchronized (snapshots) {
                    Iterator<StepPerformanceSnapShot> snapshotsIterator = snapshots.iterator();
                    while (snapshotsIterator.hasNext()) {
                        StepPerformanceSnapShot snapshot = snapshotsIterator.next();
                        if (snapshot.getSeqNr() >= startSequenceNr && snapshot.getSeqNr() <= lastStepPerformanceSnapshotSeqNrAdded) {
                            RowMetaAndData row = performanceLogTable.getLogRecord(LogStatus.START, snapshot, null);
                            ldb.setValuesInsert(row.getRowMeta(), row.getData());
                            ldb.insertRow(true);
                        }
                        lastSeqNr = snapshot.getSeqNr();
                    }
                }
            }
        }
        ldb.insertFinished(true);
        // 
        if (status.equals(LogStatus.END)) {
            ldb.cleanupLogRecords(performanceLogTable);
        }
    } catch (Exception e) {
        throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.ErrorWritingStepPerformanceLogRecordToTable"), e);
    } finally {
        if (ldb != null) {
            ldb.disconnect();
        }
    }
    return lastSeqNr + 1;
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) PerformanceLogTable(org.pentaho.di.core.logging.PerformanceLogTable) RowMetaAndData(org.pentaho.di.core.RowMetaAndData) Database(org.pentaho.di.core.database.Database) RowMetaInterface(org.pentaho.di.core.row.RowMetaInterface) ArrayList(java.util.ArrayList) List(java.util.List) KettleExtensionPoint(org.pentaho.di.core.extension.KettleExtensionPoint) StepPerformanceSnapShot(org.pentaho.di.trans.performance.StepPerformanceSnapShot) UnknownParamException(org.pentaho.di.core.parameters.UnknownParamException) KettleValueException(org.pentaho.di.core.exception.KettleValueException) KettleTransException(org.pentaho.di.core.exception.KettleTransException) DuplicateParamException(org.pentaho.di.core.parameters.DuplicateParamException) KettleFileException(org.pentaho.di.core.exception.KettleFileException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException)

Example 10 with RowMetaAndData

use of org.pentaho.di.core.RowMetaAndData in project pentaho-kettle by pentaho.

the class JobEntryPGPEncryptFiles method execute.

public Result execute(Result previousResult, int nr) {
    Result result = previousResult;
    List<RowMetaAndData> rows = result.getRows();
    RowMetaAndData resultRow = null;
    result.setNrErrors(1);
    result.setResult(false);
    try {
        NrErrors = 0;
        NrSuccess = 0;
        successConditionBroken = false;
        successConditionBrokenExit = false;
        limitFiles = Const.toInt(environmentSubstitute(getNrErrorsLessThan()), 10);
        if (include_subfolders) {
            if (isDetailed()) {
                logDetailed(BaseMessages.getString(PKG, "JobPGPEncryptFiles.Log.IncludeSubFoldersOn"));
            }
        }
        String MoveToFolder = environmentSubstitute(destinationFolder);
        // Get source and destination files, also wildcard
        String[] vsourcefilefolder = source_filefolder;
        String[] vuserid = userid;
        String[] vdestinationfilefolder = destination_filefolder;
        String[] vwildcard = wildcard;
        if (iffileexists.equals("move_file")) {
            if (Utils.isEmpty(MoveToFolder)) {
                logError(toString(), BaseMessages.getString(PKG, "JobPGPEncryptFiles.Log.Error.MoveToFolderMissing"));
                return result;
            }
            FileObject folder = null;
            try {
                folder = KettleVFS.getFileObject(MoveToFolder);
                if (!folder.exists()) {
                    if (isDetailed()) {
                        logDetailed(BaseMessages.getString(PKG, "JobPGPEncryptFiles.Log.Error.FolderMissing", MoveToFolder));
                    }
                    if (create_move_to_folder) {
                        folder.createFolder();
                    } else {
                        logError(BaseMessages.getString(PKG, "JobPGPEncryptFiles.Log.Error.FolderMissing", MoveToFolder));
                        return result;
                    }
                }
                if (!folder.getType().equals(FileType.FOLDER)) {
                    logError(BaseMessages.getString(PKG, "JobPGPEncryptFiles.Log.Error.NotFolder", MoveToFolder));
                    return result;
                }
            } catch (Exception e) {
                logError(BaseMessages.getString(PKG, "JobPGPEncryptFiles.Log.Error.GettingMoveToFolder", MoveToFolder, e.getMessage()));
                return result;
            } finally {
                if (folder != null) {
                    try {
                        folder.close();
                    } catch (IOException ex) {
                    /* Ignore */
                    }
                }
            }
        }
        gpg = new GPG(environmentSubstitute(gpglocation), log);
        if (arg_from_previous) {
            if (isDetailed()) {
                logDetailed(BaseMessages.getString(PKG, "JobPGPEncryptFiles.Log.ArgFromPrevious.Found", (rows != null ? rows.size() : 0) + ""));
            }
        }
        if (arg_from_previous && rows != null) {
            for (int iteration = 0; iteration < rows.size(); iteration++) {
                // Success condition broken?
                if (successConditionBroken) {
                    if (!successConditionBrokenExit) {
                        logError(BaseMessages.getString(PKG, "JobPGPEncryptFiles.Error.SuccessConditionbroken", "" + NrErrors));
                        successConditionBrokenExit = true;
                    }
                    result.setNrErrors(NrErrors);
                    displayResults();
                    return result;
                }
                resultRow = rows.get(iteration);
                // Get source and destination file names, also wildcard
                int vactionType_previous = getActionTypeByCode(resultRow.getString(0, null));
                String vsourcefilefolder_previous = resultRow.getString(1, null);
                String vwildcard_previous = environmentSubstitute(resultRow.getString(2, null));
                String vuserid_previous = resultRow.getString(3, null);
                String vdestinationfilefolder_previous = resultRow.getString(4, null);
                if (!Utils.isEmpty(vsourcefilefolder_previous) && !Utils.isEmpty(vdestinationfilefolder_previous)) {
                    if (isDetailed()) {
                        logDetailed(BaseMessages.getString(PKG, "JobPGPEncryptFiles.Log.ProcessingRow", vsourcefilefolder_previous, vdestinationfilefolder_previous, vwildcard_previous));
                    }
                    if (!ProcessFileFolder(vactionType_previous, vsourcefilefolder_previous, vuserid_previous, vdestinationfilefolder_previous, vwildcard_previous, parentJob, result, MoveToFolder)) {
                        // The process fail
                        // Update Errors
                        updateErrors();
                    }
                } else {
                    if (isDetailed()) {
                        logDetailed(BaseMessages.getString(PKG, "JobPGPEncryptFiles.Log.IgnoringRow", vsourcefilefolder[iteration], vdestinationfilefolder[iteration], vwildcard[iteration]));
                    }
                }
            }
        } else if (vsourcefilefolder != null && vdestinationfilefolder != null) {
            for (int i = 0; i < vsourcefilefolder.length && !parentJob.isStopped(); i++) {
                // Success condition broken?
                if (successConditionBroken) {
                    if (!successConditionBrokenExit) {
                        logError(BaseMessages.getString(PKG, "JobPGPEncryptFiles.Error.SuccessConditionbroken", "" + NrErrors));
                        successConditionBrokenExit = true;
                    }
                    result.setNrErrors(NrErrors);
                    displayResults();
                    return result;
                }
                if (!Utils.isEmpty(vsourcefilefolder[i]) && !Utils.isEmpty(vdestinationfilefolder[i])) {
                    // ok we can process this file/folder
                    if (isDetailed()) {
                        logDetailed(BaseMessages.getString(PKG, "JobPGPEncryptFiles.Log.ProcessingRow", vsourcefilefolder[i], vdestinationfilefolder[i], vwildcard[i]));
                    }
                    if (!ProcessFileFolder(action_type[i], vsourcefilefolder[i], vuserid[i], vdestinationfilefolder[i], vwildcard[i], parentJob, result, MoveToFolder)) {
                        // Update Errors
                        updateErrors();
                    }
                } else {
                    if (isDetailed()) {
                        logDetailed(BaseMessages.getString(PKG, "JobPGPEncryptFiles.Log.IgnoringRow", vsourcefilefolder[i], vdestinationfilefolder[i], vwildcard[i]));
                    }
                }
            }
        }
    } catch (Exception e) {
        updateErrors();
        logError(BaseMessages.getString("JobPGPEncryptFiles.Error", e.getMessage()));
    } finally {
        if (source_filefolder != null) {
            source_filefolder = null;
        }
        if (destination_filefolder != null) {
            destination_filefolder = null;
        }
    }
    // Success Condition
    result.setNrErrors(NrErrors);
    result.setNrLinesWritten(NrSuccess);
    if (getSuccessStatus()) {
        result.setResult(true);
    }
    displayResults();
    return result;
}
Also used : RowMetaAndData(org.pentaho.di.core.RowMetaAndData) FileObject(org.apache.commons.vfs2.FileObject) IOException(java.io.IOException) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) KettleXMLException(org.pentaho.di.core.exception.KettleXMLException) IOException(java.io.IOException) Result(org.pentaho.di.core.Result)

Aggregations

RowMetaAndData (org.pentaho.di.core.RowMetaAndData)563 ValueMetaString (org.pentaho.di.core.row.value.ValueMetaString)225 ArrayList (java.util.ArrayList)172 RowMetaInterface (org.pentaho.di.core.row.RowMetaInterface)145 TransMeta (org.pentaho.di.trans.TransMeta)116 Test (org.junit.Test)108 ValueMetaInteger (org.pentaho.di.core.row.value.ValueMetaInteger)94 KettleException (org.pentaho.di.core.exception.KettleException)89 StepMeta (org.pentaho.di.trans.step.StepMeta)80 LongObjectId (org.pentaho.di.repository.LongObjectId)75 StepInterface (org.pentaho.di.trans.step.StepInterface)75 RowStepCollector (org.pentaho.di.trans.RowStepCollector)73 Trans (org.pentaho.di.trans.Trans)73 PluginRegistry (org.pentaho.di.core.plugins.PluginRegistry)71 TransHopMeta (org.pentaho.di.trans.TransHopMeta)71 KettleValueException (org.pentaho.di.core.exception.KettleValueException)58 RowProducer (org.pentaho.di.trans.RowProducer)56 DummyTransMeta (org.pentaho.di.trans.steps.dummytrans.DummyTransMeta)54 KettleDatabaseException (org.pentaho.di.core.exception.KettleDatabaseException)53 RowMeta (org.pentaho.di.core.row.RowMeta)51