Search in sources :

Example 26 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class TransMeta method getXML.

/**
 * Gets the XML representation of this transformation, including or excluding step, database, slave server, cluster,
 * or partition information as specified by the parameters
 *
 * @param includeSteps
 *          whether to include step data
 * @param includeDatabase
 *          whether to include database data
 * @param includeSlaves
 *          whether to include slave server data
 * @param includeClusters
 *          whether to include cluster data
 * @param includePartitions
 *          whether to include partition data
 * @param includeNamedParameters
 *          whether to include named parameters data
 * @param includeLog
 *          whether to include log data
 * @param includeDependencies
 *          whether to include dependencies data
 * @param includeNotePads
 *          whether to include notepads data
 * @param includeAttributeGroups
 *          whether to include attributes map data
 * @return the XML representation of this transformation
 * @throws KettleException
 *           if any errors occur during generation of the XML
 */
public String getXML(boolean includeSteps, boolean includeDatabase, boolean includeSlaves, boolean includeClusters, boolean includePartitions, boolean includeNamedParameters, boolean includeLog, boolean includeDependencies, boolean includeNotePads, boolean includeAttributeGroups) throws KettleException {
    // Clear the embedded named clusters.  We will be repopulating from steps that used named clusters
    getNamedClusterEmbedManager().clear();
    Props props = null;
    if (Props.isInitialized()) {
        props = Props.getInstance();
    }
    StringBuilder retval = new StringBuilder(800);
    retval.append(XMLHandler.openTag(XML_TAG)).append(Const.CR);
    retval.append("  ").append(XMLHandler.openTag(XML_TAG_INFO)).append(Const.CR);
    retval.append("    ").append(XMLHandler.addTagValue("name", name));
    retval.append("    ").append(XMLHandler.addTagValue("description", description));
    retval.append("    ").append(XMLHandler.addTagValue("extended_description", extendedDescription));
    retval.append("    ").append(XMLHandler.addTagValue("trans_version", trans_version));
    retval.append("    ").append(XMLHandler.addTagValue("trans_type", transformationType.getCode()));
    if (trans_status >= 0) {
        retval.append("    ").append(XMLHandler.addTagValue("trans_status", trans_status));
    }
    retval.append("    ").append(XMLHandler.addTagValue("directory", directory != null ? directory.getPath() : RepositoryDirectory.DIRECTORY_SEPARATOR));
    if (includeNamedParameters) {
        retval.append("    ").append(XMLHandler.openTag(XML_TAG_PARAMETERS)).append(Const.CR);
        String[] parameters = listParameters();
        for (int idx = 0; idx < parameters.length; idx++) {
            retval.append("      ").append(XMLHandler.openTag("parameter")).append(Const.CR);
            retval.append("        ").append(XMLHandler.addTagValue("name", parameters[idx]));
            retval.append("        ").append(XMLHandler.addTagValue("default_value", getParameterDefault(parameters[idx])));
            retval.append("        ").append(XMLHandler.addTagValue("description", getParameterDescription(parameters[idx])));
            retval.append("      ").append(XMLHandler.closeTag("parameter")).append(Const.CR);
        }
        retval.append("    ").append(XMLHandler.closeTag(XML_TAG_PARAMETERS)).append(Const.CR);
    }
    if (includeLog) {
        retval.append("    ").append(XMLHandler.openTag("log")).append(Const.CR);
        // Add the metadata for the various logging tables
        // 
        retval.append(transLogTable.getXML());
        retval.append(performanceLogTable.getXML());
        retval.append(channelLogTable.getXML());
        retval.append(stepLogTable.getXML());
        retval.append(metricsLogTable.getXML());
        retval.append("    ").append(XMLHandler.closeTag("log")).append(Const.CR);
    }
    retval.append("    ").append(XMLHandler.openTag("maxdate")).append(Const.CR);
    retval.append("      ").append(XMLHandler.addTagValue("connection", maxDateConnection == null ? "" : maxDateConnection.getName()));
    retval.append("      ").append(XMLHandler.addTagValue("table", maxDateTable));
    retval.append("      ").append(XMLHandler.addTagValue("field", maxDateField));
    retval.append("      ").append(XMLHandler.addTagValue("offset", maxDateOffset));
    retval.append("      ").append(XMLHandler.addTagValue("maxdiff", maxDateDifference));
    retval.append("    ").append(XMLHandler.closeTag("maxdate")).append(Const.CR);
    retval.append("    ").append(XMLHandler.addTagValue("size_rowset", sizeRowset));
    retval.append("    ").append(XMLHandler.addTagValue("sleep_time_empty", sleepTimeEmpty));
    retval.append("    ").append(XMLHandler.addTagValue("sleep_time_full", sleepTimeFull));
    retval.append("    ").append(XMLHandler.addTagValue("unique_connections", usingUniqueConnections));
    retval.append("    ").append(XMLHandler.addTagValue("feedback_shown", feedbackShown));
    retval.append("    ").append(XMLHandler.addTagValue("feedback_size", feedbackSize));
    retval.append("    ").append(XMLHandler.addTagValue("using_thread_priorities", usingThreadPriorityManagment));
    retval.append("    ").append(XMLHandler.addTagValue("shared_objects_file", sharedObjectsFile));
    // Performance monitoring
    // 
    retval.append("    ").append(XMLHandler.addTagValue("capture_step_performance", capturingStepPerformanceSnapShots));
    retval.append("    ").append(XMLHandler.addTagValue("step_performance_capturing_delay", stepPerformanceCapturingDelay));
    retval.append("    ").append(XMLHandler.addTagValue("step_performance_capturing_size_limit", stepPerformanceCapturingSizeLimit));
    if (includeDependencies) {
        retval.append("    ").append(XMLHandler.openTag(XML_TAG_DEPENDENCIES)).append(Const.CR);
        for (int i = 0; i < nrDependencies(); i++) {
            TransDependency td = getDependency(i);
            retval.append(td.getXML());
        }
        retval.append("    ").append(XMLHandler.closeTag(XML_TAG_DEPENDENCIES)).append(Const.CR);
    }
    // 
    if (includePartitions) {
        retval.append("    ").append(XMLHandler.openTag(XML_TAG_PARTITIONSCHEMAS)).append(Const.CR);
        for (int i = 0; i < partitionSchemas.size(); i++) {
            PartitionSchema partitionSchema = partitionSchemas.get(i);
            retval.append(partitionSchema.getXML());
        }
        retval.append("    ").append(XMLHandler.closeTag(XML_TAG_PARTITIONSCHEMAS)).append(Const.CR);
    }
    // 
    if (includeSlaves) {
        retval.append("    ").append(XMLHandler.openTag(XML_TAG_SLAVESERVERS)).append(Const.CR);
        for (int i = 0; i < slaveServers.size(); i++) {
            SlaveServer slaveServer = slaveServers.get(i);
            retval.append(slaveServer.getXML());
        }
        retval.append("    ").append(XMLHandler.closeTag(XML_TAG_SLAVESERVERS)).append(Const.CR);
    }
    // 
    if (includeClusters) {
        retval.append("    ").append(XMLHandler.openTag(XML_TAG_CLUSTERSCHEMAS)).append(Const.CR);
        for (int i = 0; i < clusterSchemas.size(); i++) {
            ClusterSchema clusterSchema = clusterSchemas.get(i);
            retval.append(clusterSchema.getXML());
        }
        retval.append("    ").append(XMLHandler.closeTag(XML_TAG_CLUSTERSCHEMAS)).append(Const.CR);
    }
    retval.append("    ").append(XMLHandler.addTagValue("created_user", createdUser));
    retval.append("    ").append(XMLHandler.addTagValue("created_date", XMLHandler.date2string(createdDate)));
    retval.append("    ").append(XMLHandler.addTagValue("modified_user", modifiedUser));
    retval.append("    ").append(XMLHandler.addTagValue("modified_date", XMLHandler.date2string(modifiedDate)));
    try {
        retval.append("    ").append(XMLHandler.addTagValue("key_for_session_key", keyForSessionKey));
    } catch (Exception ex) {
        log.logError("Unable to decode key", ex);
    }
    retval.append("    ").append(XMLHandler.addTagValue("is_key_private", isKeyPrivate));
    retval.append("  ").append(XMLHandler.closeTag(XML_TAG_INFO)).append(Const.CR);
    if (includeNotePads) {
        retval.append("  ").append(XMLHandler.openTag(XML_TAG_NOTEPADS)).append(Const.CR);
        if (notes != null) {
            for (int i = 0; i < nrNotes(); i++) {
                NotePadMeta ni = getNote(i);
                retval.append(ni.getXML());
            }
        }
        retval.append("  ").append(XMLHandler.closeTag(XML_TAG_NOTEPADS)).append(Const.CR);
    }
    // The database connections...
    if (includeDatabase) {
        for (int i = 0; i < nrDatabases(); i++) {
            DatabaseMeta dbMeta = getDatabase(i);
            if (props != null && props.areOnlyUsedConnectionsSavedToXML()) {
                if (isDatabaseConnectionUsed(dbMeta)) {
                    retval.append(dbMeta.getXML());
                }
            } else {
                retval.append(dbMeta.getXML());
            }
        }
    }
    if (includeSteps) {
        retval.append("  ").append(XMLHandler.openTag(XML_TAG_ORDER)).append(Const.CR);
        for (int i = 0; i < nrTransHops(); i++) {
            TransHopMeta transHopMeta = getTransHop(i);
            retval.append(transHopMeta.getXML());
        }
        retval.append("  ").append(XMLHandler.closeTag(XML_TAG_ORDER)).append(Const.CR);
        /* The steps... */
        for (int i = 0; i < nrSteps(); i++) {
            StepMeta stepMeta = getStep(i);
            if (stepMeta.getStepMetaInterface() instanceof HasRepositoryInterface) {
                ((HasRepositoryInterface) stepMeta.getStepMetaInterface()).setRepository(repository);
            }
            retval.append(stepMeta.getXML());
        }
        /* The error handling metadata on the steps */
        retval.append("  ").append(XMLHandler.openTag(XML_TAG_STEP_ERROR_HANDLING)).append(Const.CR);
        for (int i = 0; i < nrSteps(); i++) {
            StepMeta stepMeta = getStep(i);
            if (stepMeta.getStepErrorMeta() != null) {
                retval.append(stepMeta.getStepErrorMeta().getXML());
            }
        }
        retval.append("  ").append(XMLHandler.closeTag(XML_TAG_STEP_ERROR_HANDLING)).append(Const.CR);
    }
    // The slave-step-copy/partition distribution. Only used for slave transformations in a clustering environment.
    retval.append(slaveStepCopyPartitionDistribution.getXML());
    // Is this a slave transformation or not?
    retval.append("  ").append(XMLHandler.addTagValue("slave_transformation", slaveTransformation));
    // 
    if (includeAttributeGroups) {
        retval.append(AttributesUtil.getAttributesXml(attributesMap));
    }
    retval.append(XMLHandler.closeTag(XML_TAG)).append(Const.CR);
    return XMLFormatter.format(retval.toString());
}
Also used : PartitionSchema(org.pentaho.di.partition.PartitionSchema) Props(org.pentaho.di.core.Props) SlaveServer(org.pentaho.di.cluster.SlaveServer) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta) StepMeta(org.pentaho.di.trans.step.StepMeta) Point(org.pentaho.di.core.gui.Point) KettleExtensionPoint(org.pentaho.di.core.extension.KettleExtensionPoint) KettleXMLException(org.pentaho.di.core.exception.KettleXMLException) KettleRowException(org.pentaho.di.core.exception.KettleRowException) FileSystemException(org.apache.commons.vfs2.FileSystemException) KettleStepException(org.pentaho.di.core.exception.KettleStepException) IOException(java.io.IOException) KettleMissingPluginsException(org.pentaho.di.core.exception.KettleMissingPluginsException) KettleFileException(org.pentaho.di.core.exception.KettleFileException) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) HasRepositoryInterface(org.pentaho.di.repository.HasRepositoryInterface) NotePadMeta(org.pentaho.di.core.NotePadMeta) ClusterSchema(org.pentaho.di.cluster.ClusterSchema)

Example 27 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class TransMeta method loadXML.

/**
 * Parses an XML DOM (starting at the specified Node) that describes the transformation.
 *
 * @param transnode
 *          The XML node to load from
 * @param fname
 *          The filename
 * @param rep
 *          The repository to load the default list of database connections from (null if no repository is available)
 * @param setInternalVariables
 *          true if you want to set the internal variables based on this transformation information
 * @param parentVariableSpace
 *          the parent variable space to use during TransMeta construction
 * @param prompter
 *          the changed/replace listener or null if there is none
 * @throws KettleXMLException
 *           if any errors occur during parsing of the specified file
 * @throws KettleMissingPluginsException
 *           in case missing plugins were found (details are in the exception in that case)
 */
public void loadXML(Node transnode, String fname, IMetaStore metaStore, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace, OverwritePrompter prompter) throws KettleXMLException, KettleMissingPluginsException {
    KettleMissingPluginsException missingPluginsException = new KettleMissingPluginsException(BaseMessages.getString(PKG, "TransMeta.MissingPluginsFoundWhileLoadingTransformation.Exception"));
    // Remember this as the primary meta store.
    this.metaStore = metaStore;
    try {
        Props props = null;
        if (Props.isInitialized()) {
            props = Props.getInstance();
        }
        initializeVariablesFrom(parentVariableSpace);
        try {
            // Clear the transformation
            clear();
            // Set the filename here so it can be used in variables for ALL aspects of the transformation FIX: PDI-8890
            if (null == rep) {
                setFilename(fname);
            } else {
                // Set the repository here so it can be used in variables for ALL aspects of the job FIX: PDI-16441
                setRepository(rep);
            }
            // 
            try {
                sharedObjectsFile = XMLHandler.getTagValue(transnode, "info", "shared_objects_file");
                sharedObjects = rep != null ? rep.readTransSharedObjects(this) : readSharedObjects();
            } catch (Exception e) {
                log.logError(BaseMessages.getString(PKG, "TransMeta.ErrorReadingSharedObjects.Message", e.toString()));
                log.logError(Const.getStackTracker(e));
            }
            // Load the database connections, slave servers, cluster schemas & partition schemas into this object.
            // 
            importFromMetaStore();
            // Handle connections
            int n = XMLHandler.countNodes(transnode, DatabaseMeta.XML_TAG);
            Set<String> privateTransformationDatabases = new HashSet<>(n);
            if (log.isDebug()) {
                log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.WeHaveConnections", String.valueOf(n)));
            }
            for (int i = 0; i < n; i++) {
                if (log.isDebug()) {
                    log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.LookingAtConnection") + i);
                }
                Node nodecon = XMLHandler.getSubNodeByNr(transnode, DatabaseMeta.XML_TAG, i);
                DatabaseMeta dbcon = new DatabaseMeta(nodecon);
                dbcon.shareVariablesWith(this);
                if (!dbcon.isShared()) {
                    privateTransformationDatabases.add(dbcon.getName());
                }
                DatabaseMeta exist = findDatabase(dbcon.getName());
                if (exist == null) {
                    addDatabase(dbcon);
                } else {
                    if (!exist.isShared()) {
                        // otherwise, we just keep the shared connection.
                        if (shouldOverwrite(prompter, props, BaseMessages.getString(PKG, "TransMeta.Message.OverwriteConnectionYN", dbcon.getName()), BaseMessages.getString(PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage"))) {
                            int idx = indexOfDatabase(exist);
                            removeDatabase(idx);
                            addDatabase(idx, dbcon);
                        }
                    }
                }
            }
            setPrivateDatabases(privateTransformationDatabases);
            // Read the notes...
            Node notepadsnode = XMLHandler.getSubNode(transnode, XML_TAG_NOTEPADS);
            int nrnotes = XMLHandler.countNodes(notepadsnode, NotePadMeta.XML_TAG);
            for (int i = 0; i < nrnotes; i++) {
                Node notepadnode = XMLHandler.getSubNodeByNr(notepadsnode, NotePadMeta.XML_TAG, i);
                NotePadMeta ni = new NotePadMeta(notepadnode);
                notes.add(ni);
            }
            // Handle Steps
            int s = XMLHandler.countNodes(transnode, StepMeta.XML_TAG);
            if (log.isDebug()) {
                log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.ReadingSteps") + s + " steps...");
            }
            for (int i = 0; i < s; i++) {
                Node stepnode = XMLHandler.getSubNodeByNr(transnode, StepMeta.XML_TAG, i);
                if (log.isDebug()) {
                    log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.LookingAtStep") + i);
                }
                StepMeta stepMeta = new StepMeta(stepnode, databases, metaStore);
                // for tracing, retain hierarchy
                stepMeta.setParentTransMeta(this);
                if (stepMeta.isMissing()) {
                    addMissingTrans((MissingTrans) stepMeta.getStepMetaInterface());
                }
                // Check if the step exists and if it's a shared step.
                // If so, then we will keep the shared version, not this one.
                // The stored XML is only for backup purposes.
                // 
                StepMeta check = findStep(stepMeta.getName());
                if (check != null) {
                    if (!check.isShared()) {
                        // Don't overwrite shared objects
                        addOrReplaceStep(stepMeta);
                    } else {
                        // Just keep the drawn flag and location
                        check.setDraw(stepMeta.isDrawn());
                        check.setLocation(stepMeta.getLocation());
                    }
                } else {
                    // simply add it.
                    addStep(stepMeta);
                }
            }
            // Read the error handling code of the steps...
            // 
            Node errorHandlingNode = XMLHandler.getSubNode(transnode, XML_TAG_STEP_ERROR_HANDLING);
            int nrErrorHandlers = XMLHandler.countNodes(errorHandlingNode, StepErrorMeta.XML_ERROR_TAG);
            for (int i = 0; i < nrErrorHandlers; i++) {
                Node stepErrorMetaNode = XMLHandler.getSubNodeByNr(errorHandlingNode, StepErrorMeta.XML_ERROR_TAG, i);
                StepErrorMeta stepErrorMeta = new StepErrorMeta(this, stepErrorMetaNode, steps);
                if (stepErrorMeta.getSourceStep() != null) {
                    // a bit of a trick, I know.
                    stepErrorMeta.getSourceStep().setStepErrorMeta(stepErrorMeta);
                }
            }
            // 
            for (int i = 0; i < nrSteps(); i++) {
                StepMeta stepMeta = getStep(i);
                StepMetaInterface sii = stepMeta.getStepMetaInterface();
                if (sii != null) {
                    sii.searchInfoAndTargetSteps(steps);
                }
            }
            // Handle Hops
            // 
            Node ordernode = XMLHandler.getSubNode(transnode, XML_TAG_ORDER);
            n = XMLHandler.countNodes(ordernode, TransHopMeta.XML_HOP_TAG);
            if (log.isDebug()) {
                log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.WeHaveHops") + n + " hops...");
            }
            for (int i = 0; i < n; i++) {
                if (log.isDebug()) {
                    log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.LookingAtHop") + i);
                }
                Node hopnode = XMLHandler.getSubNodeByNr(ordernode, TransHopMeta.XML_HOP_TAG, i);
                TransHopMeta hopinf = new TransHopMeta(hopnode, steps);
                hopinf.setErrorHop(isErrorNode(errorHandlingNode, hopnode));
                addTransHop(hopinf);
            }
            // 
            // get transformation info:
            // 
            Node infonode = XMLHandler.getSubNode(transnode, XML_TAG_INFO);
            // Name
            // 
            setName(XMLHandler.getTagValue(infonode, "name"));
            // description
            // 
            description = XMLHandler.getTagValue(infonode, "description");
            // extended description
            // 
            extendedDescription = XMLHandler.getTagValue(infonode, "extended_description");
            // trans version
            // 
            trans_version = XMLHandler.getTagValue(infonode, "trans_version");
            // trans status
            // 
            trans_status = Const.toInt(XMLHandler.getTagValue(infonode, "trans_status"), -1);
            String transTypeCode = XMLHandler.getTagValue(infonode, "trans_type");
            transformationType = TransformationType.getTransformationTypeByCode(transTypeCode);
            // 
            if (rep != null) {
                String directoryPath = XMLHandler.getTagValue(infonode, "directory");
                if (directoryPath != null) {
                    directory = rep.findDirectory(directoryPath);
                    if (directory == null) {
                        // not found
                        // The root as default
                        directory = new RepositoryDirectory();
                    }
                }
            }
            // Read logging table information
            // 
            Node logNode = XMLHandler.getSubNode(infonode, "log");
            if (logNode != null) {
                // Backward compatibility...
                // 
                Node transLogNode = XMLHandler.getSubNode(logNode, TransLogTable.XML_TAG);
                if (transLogNode == null) {
                    // Load the XML
                    // 
                    transLogTable.findField(TransLogTable.ID.LINES_READ).setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "read")));
                    transLogTable.findField(TransLogTable.ID.LINES_WRITTEN).setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "write")));
                    transLogTable.findField(TransLogTable.ID.LINES_INPUT).setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "input")));
                    transLogTable.findField(TransLogTable.ID.LINES_OUTPUT).setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "output")));
                    transLogTable.findField(TransLogTable.ID.LINES_UPDATED).setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "update")));
                    transLogTable.findField(TransLogTable.ID.LINES_REJECTED).setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "rejected")));
                    transLogTable.setConnectionName(XMLHandler.getTagValue(infonode, "log", "connection"));
                    transLogTable.setSchemaName(XMLHandler.getTagValue(infonode, "log", "schema"));
                    transLogTable.setTableName(XMLHandler.getTagValue(infonode, "log", "table"));
                    transLogTable.findField(TransLogTable.ID.ID_BATCH).setEnabled("Y".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "log", "use_batchid")));
                    transLogTable.findField(TransLogTable.ID.LOG_FIELD).setEnabled("Y".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "log", "USE_LOGFIELD")));
                    transLogTable.setLogSizeLimit(XMLHandler.getTagValue(infonode, "log", "size_limit_lines"));
                    transLogTable.setLogInterval(XMLHandler.getTagValue(infonode, "log", "interval"));
                    transLogTable.findField(TransLogTable.ID.CHANNEL_ID).setEnabled(false);
                    transLogTable.findField(TransLogTable.ID.LINES_REJECTED).setEnabled(false);
                    performanceLogTable.setConnectionName(transLogTable.getConnectionName());
                    performanceLogTable.setTableName(XMLHandler.getTagValue(infonode, "log", "step_performance_table"));
                } else {
                    transLogTable.loadXML(transLogNode, databases, steps);
                }
                Node perfLogNode = XMLHandler.getSubNode(logNode, PerformanceLogTable.XML_TAG);
                if (perfLogNode != null) {
                    performanceLogTable.loadXML(perfLogNode, databases, steps);
                }
                Node channelLogNode = XMLHandler.getSubNode(logNode, ChannelLogTable.XML_TAG);
                if (channelLogNode != null) {
                    channelLogTable.loadXML(channelLogNode, databases, steps);
                }
                Node stepLogNode = XMLHandler.getSubNode(logNode, StepLogTable.XML_TAG);
                if (stepLogNode != null) {
                    stepLogTable.loadXML(stepLogNode, databases, steps);
                }
                Node metricsLogNode = XMLHandler.getSubNode(logNode, MetricsLogTable.XML_TAG);
                if (metricsLogNode != null) {
                    metricsLogTable.loadXML(metricsLogNode, databases, steps);
                }
            }
            // Maxdate range options...
            String maxdatcon = XMLHandler.getTagValue(infonode, "maxdate", "connection");
            maxDateConnection = findDatabase(maxdatcon);
            maxDateTable = XMLHandler.getTagValue(infonode, "maxdate", "table");
            maxDateField = XMLHandler.getTagValue(infonode, "maxdate", "field");
            String offset = XMLHandler.getTagValue(infonode, "maxdate", "offset");
            maxDateOffset = Const.toDouble(offset, 0.0);
            String mdiff = XMLHandler.getTagValue(infonode, "maxdate", "maxdiff");
            maxDateDifference = Const.toDouble(mdiff, 0.0);
            // Check the dependencies as far as dates are concerned...
            // We calculate BEFORE we run the MAX of these dates
            // If the date is larger then enddate, startdate is set to MIN_DATE
            // 
            Node depsNode = XMLHandler.getSubNode(infonode, XML_TAG_DEPENDENCIES);
            int nrDeps = XMLHandler.countNodes(depsNode, TransDependency.XML_TAG);
            for (int i = 0; i < nrDeps; i++) {
                Node depNode = XMLHandler.getSubNodeByNr(depsNode, TransDependency.XML_TAG, i);
                TransDependency transDependency = new TransDependency(depNode, databases);
                if (transDependency.getDatabase() != null && transDependency.getFieldname() != null) {
                    addDependency(transDependency);
                }
            }
            // Read the named parameters.
            Node paramsNode = XMLHandler.getSubNode(infonode, XML_TAG_PARAMETERS);
            int nrParams = XMLHandler.countNodes(paramsNode, "parameter");
            for (int i = 0; i < nrParams; i++) {
                Node paramNode = XMLHandler.getSubNodeByNr(paramsNode, "parameter", i);
                String paramName = XMLHandler.getTagValue(paramNode, "name");
                String defaultValue = XMLHandler.getTagValue(paramNode, "default_value");
                String descr = XMLHandler.getTagValue(paramNode, "description");
                addParameterDefinition(paramName, defaultValue, descr);
            }
            // Read the partitioning schemas
            // 
            Node partSchemasNode = XMLHandler.getSubNode(infonode, XML_TAG_PARTITIONSCHEMAS);
            int nrPartSchemas = XMLHandler.countNodes(partSchemasNode, PartitionSchema.XML_TAG);
            for (int i = 0; i < nrPartSchemas; i++) {
                Node partSchemaNode = XMLHandler.getSubNodeByNr(partSchemasNode, PartitionSchema.XML_TAG, i);
                PartitionSchema partitionSchema = new PartitionSchema(partSchemaNode);
                // Check if the step exists and if it's a shared step.
                // If so, then we will keep the shared version, not this one.
                // The stored XML is only for backup purposes.
                // 
                PartitionSchema check = findPartitionSchema(partitionSchema.getName());
                if (check != null) {
                    if (!check.isShared()) {
                        // we don't overwrite shared objects.
                        if (shouldOverwrite(prompter, props, BaseMessages.getString(PKG, "TransMeta.Message.OverwritePartitionSchemaYN", partitionSchema.getName()), BaseMessages.getString(PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage"))) {
                            addOrReplacePartitionSchema(partitionSchema);
                        }
                    }
                } else {
                    partitionSchemas.add(partitionSchema);
                }
            }
            // 
            for (int i = 0; i < nrSteps(); i++) {
                StepPartitioningMeta stepPartitioningMeta = getStep(i).getStepPartitioningMeta();
                if (stepPartitioningMeta != null) {
                    stepPartitioningMeta.setPartitionSchemaAfterLoading(partitionSchemas);
                }
                StepPartitioningMeta targetStepPartitioningMeta = getStep(i).getTargetStepPartitioningMeta();
                if (targetStepPartitioningMeta != null) {
                    targetStepPartitioningMeta.setPartitionSchemaAfterLoading(partitionSchemas);
                }
            }
            // Read the slave servers...
            // 
            Node slaveServersNode = XMLHandler.getSubNode(infonode, XML_TAG_SLAVESERVERS);
            int nrSlaveServers = XMLHandler.countNodes(slaveServersNode, SlaveServer.XML_TAG);
            for (int i = 0; i < nrSlaveServers; i++) {
                Node slaveServerNode = XMLHandler.getSubNodeByNr(slaveServersNode, SlaveServer.XML_TAG, i);
                SlaveServer slaveServer = new SlaveServer(slaveServerNode);
                if (slaveServer.getName() == null) {
                    log.logError(BaseMessages.getString(PKG, "TransMeta.Log.WarningWhileCreationSlaveServer", slaveServer.getName()));
                    continue;
                }
                slaveServer.shareVariablesWith(this);
                // Check if the object exists and if it's a shared object.
                // If so, then we will keep the shared version, not this one.
                // The stored XML is only for backup purposes.
                SlaveServer check = findSlaveServer(slaveServer.getName());
                if (check != null) {
                    if (!check.isShared()) {
                        // we don't overwrite shared objects.
                        if (shouldOverwrite(prompter, props, BaseMessages.getString(PKG, "TransMeta.Message.OverwriteSlaveServerYN", slaveServer.getName()), BaseMessages.getString(PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage"))) {
                            addOrReplaceSlaveServer(slaveServer);
                        }
                    }
                } else {
                    slaveServers.add(slaveServer);
                }
            }
            // Read the cluster schemas
            // 
            Node clusterSchemasNode = XMLHandler.getSubNode(infonode, XML_TAG_CLUSTERSCHEMAS);
            int nrClusterSchemas = XMLHandler.countNodes(clusterSchemasNode, ClusterSchema.XML_TAG);
            for (int i = 0; i < nrClusterSchemas; i++) {
                Node clusterSchemaNode = XMLHandler.getSubNodeByNr(clusterSchemasNode, ClusterSchema.XML_TAG, i);
                ClusterSchema clusterSchema = new ClusterSchema(clusterSchemaNode, slaveServers);
                clusterSchema.shareVariablesWith(this);
                // Check if the object exists and if it's a shared object.
                // If so, then we will keep the shared version, not this one.
                // The stored XML is only for backup purposes.
                ClusterSchema check = findClusterSchema(clusterSchema.getName());
                if (check != null) {
                    if (!check.isShared()) {
                        // we don't overwrite shared objects.
                        if (shouldOverwrite(prompter, props, BaseMessages.getString(PKG, "TransMeta.Message.OverwriteClusterSchemaYN", clusterSchema.getName()), BaseMessages.getString(PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage"))) {
                            addOrReplaceClusterSchema(clusterSchema);
                        }
                    }
                } else {
                    clusterSchemas.add(clusterSchema);
                }
            }
            // 
            for (int i = 0; i < nrSteps(); i++) {
                getStep(i).setClusterSchemaAfterLoading(clusterSchemas);
            }
            String srowset = XMLHandler.getTagValue(infonode, "size_rowset");
            sizeRowset = Const.toInt(srowset, Const.ROWS_IN_ROWSET);
            sleepTimeEmpty = Const.toInt(XMLHandler.getTagValue(infonode, "sleep_time_empty"), Const.TIMEOUT_GET_MILLIS);
            sleepTimeFull = Const.toInt(XMLHandler.getTagValue(infonode, "sleep_time_full"), Const.TIMEOUT_PUT_MILLIS);
            usingUniqueConnections = "Y".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "unique_connections"));
            feedbackShown = !"N".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "feedback_shown"));
            feedbackSize = Const.toInt(XMLHandler.getTagValue(infonode, "feedback_size"), Const.ROWS_UPDATE);
            usingThreadPriorityManagment = !"N".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "using_thread_priorities"));
            // Performance monitoring for steps...
            // 
            capturingStepPerformanceSnapShots = "Y".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "capture_step_performance"));
            stepPerformanceCapturingDelay = Const.toLong(XMLHandler.getTagValue(infonode, "step_performance_capturing_delay"), 1000);
            stepPerformanceCapturingSizeLimit = XMLHandler.getTagValue(infonode, "step_performance_capturing_size_limit");
            // Created user/date
            createdUser = XMLHandler.getTagValue(infonode, "created_user");
            String createDate = XMLHandler.getTagValue(infonode, "created_date");
            if (createDate != null) {
                createdDate = XMLHandler.stringToDate(createDate);
            }
            // Changed user/date
            modifiedUser = XMLHandler.getTagValue(infonode, "modified_user");
            String modDate = XMLHandler.getTagValue(infonode, "modified_date");
            if (modDate != null) {
                modifiedDate = XMLHandler.stringToDate(modDate);
            }
            Node partitionDistNode = XMLHandler.getSubNode(transnode, SlaveStepCopyPartitionDistribution.XML_TAG);
            if (partitionDistNode != null) {
                slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution(partitionDistNode);
            } else {
                // leave empty
                slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution();
            }
            // Is this a slave transformation?
            // 
            slaveTransformation = "Y".equalsIgnoreCase(XMLHandler.getTagValue(transnode, "slave_transformation"));
            if (log.isDebug()) {
                log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.NumberOfStepsReaded") + nrSteps());
                log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.NumberOfHopsReaded") + nrTransHops());
            }
            sortSteps();
            // Load the attribute groups map
            // 
            attributesMap = AttributesUtil.loadAttributes(XMLHandler.getSubNode(transnode, AttributesUtil.XML_TAG));
            keyForSessionKey = XMLHandler.stringToBinary(XMLHandler.getTagValue(infonode, "key_for_session_key"));
            isKeyPrivate = "Y".equals(XMLHandler.getTagValue(infonode, "is_key_private"));
        } catch (KettleXMLException xe) {
            throw new KettleXMLException(BaseMessages.getString(PKG, "TransMeta.Exception.ErrorReadingTransformation"), xe);
        } catch (KettleException e) {
            throw new KettleXMLException(e);
        } finally {
            initializeVariablesFrom(null);
            if (setInternalVariables) {
                setInternalKettleVariables();
            }
            ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.TransformationMetaLoaded.id, this);
        }
    } catch (Exception e) {
        // 
        if (!missingPluginsException.getMissingPluginDetailsList().isEmpty()) {
            throw missingPluginsException;
        } else {
            throw new KettleXMLException(BaseMessages.getString(PKG, "TransMeta.Exception.ErrorReadingTransformation"), e);
        }
    } finally {
        if (!missingPluginsException.getMissingPluginDetailsList().isEmpty()) {
            throw missingPluginsException;
        }
    }
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) RepositoryDirectory(org.pentaho.di.repository.RepositoryDirectory) KettleMissingPluginsException(org.pentaho.di.core.exception.KettleMissingPluginsException) PartitionSchema(org.pentaho.di.partition.PartitionSchema) Node(org.w3c.dom.Node) StepErrorMeta(org.pentaho.di.trans.step.StepErrorMeta) StepMetaInterface(org.pentaho.di.trans.step.StepMetaInterface) Props(org.pentaho.di.core.Props) StepPartitioningMeta(org.pentaho.di.trans.step.StepPartitioningMeta) SlaveServer(org.pentaho.di.cluster.SlaveServer) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta) StepMeta(org.pentaho.di.trans.step.StepMeta) KettleXMLException(org.pentaho.di.core.exception.KettleXMLException) KettleRowException(org.pentaho.di.core.exception.KettleRowException) FileSystemException(org.apache.commons.vfs2.FileSystemException) KettleStepException(org.pentaho.di.core.exception.KettleStepException) IOException(java.io.IOException) KettleMissingPluginsException(org.pentaho.di.core.exception.KettleMissingPluginsException) KettleFileException(org.pentaho.di.core.exception.KettleFileException) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) Point(org.pentaho.di.core.gui.Point) KettleExtensionPoint(org.pentaho.di.core.extension.KettleExtensionPoint) KettleXMLException(org.pentaho.di.core.exception.KettleXMLException) NotePadMeta(org.pentaho.di.core.NotePadMeta) ClusterSchema(org.pentaho.di.cluster.ClusterSchema) HashSet(java.util.HashSet)

Example 28 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class TransMeta method realClone.

/**
 * Perform a real clone of the transformation meta-data object, including cloning all lists and copying all values. If
 * the doClear parameter is true, the clone will be cleared of ALL values before the copy. If false, only the copied
 * fields will be cleared.
 *
 * @param doClear
 *          Whether to clear all of the clone's data before copying from the source object
 * @return a real clone of the calling object
 */
public Object realClone(boolean doClear) {
    try {
        TransMeta transMeta = (TransMeta) super.clone();
        if (doClear) {
            transMeta.clear();
        } else {
            // Clear out the things we're replacing below
            transMeta.databases = new ArrayList<>();
            transMeta.steps = new ArrayList<>();
            transMeta.hops = new ArrayList<>();
            transMeta.notes = new ArrayList<>();
            transMeta.dependencies = new ArrayList<>();
            transMeta.partitionSchemas = new ArrayList<>();
            transMeta.slaveServers = new ArrayList<>();
            transMeta.clusterSchemas = new ArrayList<>();
            transMeta.namedParams = new NamedParamsDefault();
            transMeta.stepChangeListeners = new ArrayList<>();
        }
        for (DatabaseMeta db : databases) {
            transMeta.addDatabase((DatabaseMeta) db.clone());
        }
        for (StepMeta step : steps) {
            transMeta.addStep((StepMeta) step.clone());
        }
        // PDI-15799: Step references are original yet. Set them to the clones.
        for (StepMeta step : transMeta.getSteps()) {
            final StepMetaInterface stepMetaInterface = step.getStepMetaInterface();
            if (stepMetaInterface != null) {
                final StepIOMetaInterface stepIOMeta = stepMetaInterface.getStepIOMeta();
                if (stepIOMeta != null) {
                    for (StreamInterface stream : stepIOMeta.getInfoStreams()) {
                        String streamStepName = stream.getStepname();
                        if (streamStepName != null) {
                            StepMeta streamStepMeta = transMeta.findStep(streamStepName);
                            stream.setStepMeta(streamStepMeta);
                        }
                    }
                }
            }
        }
        for (TransHopMeta hop : hops) {
            transMeta.addTransHop((TransHopMeta) hop.clone());
        }
        for (NotePadMeta note : notes) {
            transMeta.addNote((NotePadMeta) note.clone());
        }
        for (TransDependency dep : dependencies) {
            transMeta.addDependency((TransDependency) dep.clone());
        }
        for (SlaveServer slave : slaveServers) {
            transMeta.getSlaveServers().add((SlaveServer) slave.clone());
        }
        for (ClusterSchema schema : clusterSchemas) {
            transMeta.getClusterSchemas().add(schema.clone());
        }
        for (PartitionSchema schema : partitionSchemas) {
            transMeta.getPartitionSchemas().add((PartitionSchema) schema.clone());
        }
        for (String key : listParameters()) {
            transMeta.addParameterDefinition(key, getParameterDefault(key), getParameterDescription(key));
        }
        return transMeta;
    } catch (Exception e) {
        e.printStackTrace();
        return null;
    }
}
Also used : NamedParamsDefault(org.pentaho.di.core.parameters.NamedParamsDefault) PartitionSchema(org.pentaho.di.partition.PartitionSchema) StepMetaInterface(org.pentaho.di.trans.step.StepMetaInterface) StepIOMetaInterface(org.pentaho.di.trans.step.StepIOMetaInterface) SlaveServer(org.pentaho.di.cluster.SlaveServer) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta) StepMeta(org.pentaho.di.trans.step.StepMeta) KettleXMLException(org.pentaho.di.core.exception.KettleXMLException) KettleRowException(org.pentaho.di.core.exception.KettleRowException) FileSystemException(org.apache.commons.vfs2.FileSystemException) KettleStepException(org.pentaho.di.core.exception.KettleStepException) IOException(java.io.IOException) KettleMissingPluginsException(org.pentaho.di.core.exception.KettleMissingPluginsException) KettleFileException(org.pentaho.di.core.exception.KettleFileException) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) NotePadMeta(org.pentaho.di.core.NotePadMeta) ClusterSchema(org.pentaho.di.cluster.ClusterSchema) StreamInterface(org.pentaho.di.trans.step.errorhandling.StreamInterface)

Example 29 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class TransSplitter method generateSlavePartitionSchemas.

/**
 * We want to divide the available partitions over the slaves. Let's create a hashtable that contains the partition
 * schema's Since we can only use a single cluster, we can divide them all over a single set of slave servers.
 *
 * @throws KettleException
 */
private void generateSlavePartitionSchemas() throws KettleException {
    slaveServerPartitionsMap = new Hashtable<SlaveServer, Map<PartitionSchema, List<String>>>();
    for (int i = 0; i < referenceSteps.length; i++) {
        StepMeta stepMeta = referenceSteps[i];
        StepPartitioningMeta stepPartitioningMeta = stepMeta.getStepPartitioningMeta();
        if (stepPartitioningMeta == null) {
            continue;
        }
        if (stepPartitioningMeta.getMethodType() == StepPartitioningMeta.PARTITIONING_METHOD_NONE) {
            continue;
        }
        ClusterSchema clusterSchema = stepMeta.getClusterSchema();
        if (clusterSchema == null) {
            continue;
        }
        // Make a copy of the partition schema because we might change the object.
        // Let's not alter the original transformation.
        // The match is done on name, and the name is preserved in this case, so it should be safe to do so.
        // Also, all cloned steps re-match with the cloned schema name afterwards...
        // 
        PartitionSchema partitionSchema = (PartitionSchema) stepPartitioningMeta.getPartitionSchema().clone();
        int nrSlaves = clusterSchema.findNrSlaves();
        if (nrSlaves == 0) {
            // no slaves: ignore this situation too
            continue;
        }
        // 
        if (partitionSchema.isDynamicallyDefined()) {
            partitionSchema.expandPartitionsDynamically(nrSlaves, originalTransformation);
        }
        int nrPartitions = partitionSchema.getPartitionIDs().size();
        if (nrPartitions < nrSlaves) {
            throw new KettleException("It doesn't make sense to have a partitioned, clustered step with less partitions (" + nrPartitions + ") than that there are slave servers (" + nrSlaves + ")");
        }
        int slaveServerNr = 0;
        List<SlaveServer> slaveServers = clusterSchema.getSlaveServers();
        for (int p = 0; p < nrPartitions; p++) {
            String partitionId = partitionSchema.getPartitionIDs().get(p);
            SlaveServer slaveServer = slaveServers.get(slaveServerNr);
            // 
            if (slaveServer.isMaster()) {
                slaveServerNr++;
                if (slaveServerNr >= slaveServers.size()) {
                    // re-start
                    slaveServerNr = 0;
                }
                slaveServer = slaveServers.get(slaveServerNr);
            }
            Map<PartitionSchema, List<String>> schemaPartitionsMap = slaveServerPartitionsMap.get(slaveServer);
            if (schemaPartitionsMap == null) {
                // Add the schema-partitions map to the the slave server
                // 
                schemaPartitionsMap = new HashMap<PartitionSchema, List<String>>();
                slaveServerPartitionsMap.put(slaveServer, schemaPartitionsMap);
            }
            // See if we find a list of partitions
            // 
            List<String> partitions = schemaPartitionsMap.get(partitionSchema);
            if (partitions == null) {
                partitions = new ArrayList<String>();
                schemaPartitionsMap.put(partitionSchema, partitions);
            }
            // 
            if (partitions.indexOf(partitionId) < 0) {
                partitions.add(partitionId);
            }
            // Switch to next slave.
            slaveServerNr++;
            if (slaveServerNr >= clusterSchema.getSlaveServers().size()) {
                // re-start
                slaveServerNr = 0;
            }
        }
    }
// System.out.println("We have "+(slaveServerPartitionsMap.size())+" entries in the slave server partitions map");
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) PartitionSchema(org.pentaho.di.partition.PartitionSchema) SlaveServer(org.pentaho.di.cluster.SlaveServer) StepPartitioningMeta(org.pentaho.di.trans.step.StepPartitioningMeta) StepMeta(org.pentaho.di.trans.step.StepMeta) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) ClusterSchema(org.pentaho.di.cluster.ClusterSchema)

Example 30 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class SlavesController method createSlave.

public void createSlave() {
    try {
        // Create a new SlaveServer for storing the result
        SlaveServer slaveServer = new SlaveServer();
        SlaveServerDialog ssd = new SlaveServerDialog(shell, slaveServer);
        if (ssd.open()) {
            ObjectId slaveId = repository.getSlaveID(slaveServer.getName());
            // Make sure the slave does not already exist
            if (slaveId != null) {
                MessageBox mb = new MessageBox(shell, SWT.ICON_ERROR | SWT.OK);
                mb.setMessage(BaseMessages.getString(PKG, "RepositoryExplorerDialog.Slave.AlreadyExists.Message", slaveServer.getName()));
                mb.setText(BaseMessages.getString(PKG, "RepositoryExplorerDialog.Slave.Create.Title"));
                mb.open();
            } else {
                if (slaveServer.getName() != null && !slaveServer.getName().equals("")) {
                    repository.insertLogEntry(BaseMessages.getString(PKG, "SlavesController.Message.CreatingSlave", slaveServer.getName()));
                    repository.save(slaveServer, Const.VERSION_COMMENT_INITIAL_VERSION, null);
                    if (getSharedObjectSyncUtil() != null) {
                        getSharedObjectSyncUtil().reloadJobRepositoryObjects(true);
                        getSharedObjectSyncUtil().reloadTransformationRepositoryObjects(true);
                    }
                } else {
                    MessageBox mb = new MessageBox(shell, SWT.ICON_ERROR | SWT.OK);
                    mb.setMessage(BaseMessages.getString(PKG, "RepositoryExplorerDialog.Slave.Edit.InvalidName.Message"));
                    mb.setText(BaseMessages.getString(PKG, "RepositoryExplorerDialog.Slave.Create.Title"));
                    mb.open();
                }
            }
        }
    } catch (KettleException e) {
        if (mainController == null || !mainController.handleLostRepository(e)) {
            new ErrorDialog(shell, BaseMessages.getString(PKG, "RepositoryExplorerDialog.Slave.Create.Title"), BaseMessages.getString(PKG, "RepositoryExplorerDialog.Slave.Create.UnexpectedError.Message"), e);
        }
    } finally {
        refreshSlaves();
    }
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) ObjectId(org.pentaho.di.repository.ObjectId) SlaveServerDialog(org.pentaho.di.ui.cluster.dialog.SlaveServerDialog) ErrorDialog(org.pentaho.di.ui.core.dialog.ErrorDialog) SlaveServer(org.pentaho.di.cluster.SlaveServer) MessageBox(org.eclipse.swt.widgets.MessageBox)

Aggregations

SlaveServer (org.pentaho.di.cluster.SlaveServer)103 KettleException (org.pentaho.di.core.exception.KettleException)35 DatabaseMeta (org.pentaho.di.core.database.DatabaseMeta)28 ClusterSchema (org.pentaho.di.cluster.ClusterSchema)22 KettleDatabaseException (org.pentaho.di.core.exception.KettleDatabaseException)18 PartitionSchema (org.pentaho.di.partition.PartitionSchema)18 Test (org.junit.Test)17 KettleExtensionPoint (org.pentaho.di.core.extension.KettleExtensionPoint)17 JobMeta (org.pentaho.di.job.JobMeta)16 ObjectId (org.pentaho.di.repository.ObjectId)15 StepMeta (org.pentaho.di.trans.step.StepMeta)14 ArrayList (java.util.ArrayList)13 TransMeta (org.pentaho.di.trans.TransMeta)11 KettleFileException (org.pentaho.di.core.exception.KettleFileException)10 NotePadMeta (org.pentaho.di.core.NotePadMeta)9 UnknownParamException (org.pentaho.di.core.parameters.UnknownParamException)9 Point (org.pentaho.di.core.gui.Point)8 KettleXMLException (org.pentaho.di.core.exception.KettleXMLException)7 DuplicateParamException (org.pentaho.di.core.parameters.DuplicateParamException)7 JobEntryCopy (org.pentaho.di.job.entry.JobEntryCopy)7