Search in sources :

Example 41 with DatabaseMeta

use of org.pentaho.di.core.database.DatabaseMeta in project pentaho-kettle by pentaho.

the class TransMeta method getXML.

/**
 * Gets the XML representation of this transformation, including or excluding step, database, slave server, cluster,
 * or partition information as specified by the parameters
 *
 * @param includeSteps
 *          whether to include step data
 * @param includeDatabase
 *          whether to include database data
 * @param includeSlaves
 *          whether to include slave server data
 * @param includeClusters
 *          whether to include cluster data
 * @param includePartitions
 *          whether to include partition data
 * @param includeNamedParameters
 *          whether to include named parameters data
 * @param includeLog
 *          whether to include log data
 * @param includeDependencies
 *          whether to include dependencies data
 * @param includeNotePads
 *          whether to include notepads data
 * @param includeAttributeGroups
 *          whether to include attributes map data
 * @return the XML representation of this transformation
 * @throws KettleException
 *           if any errors occur during generation of the XML
 */
public String getXML(boolean includeSteps, boolean includeDatabase, boolean includeSlaves, boolean includeClusters, boolean includePartitions, boolean includeNamedParameters, boolean includeLog, boolean includeDependencies, boolean includeNotePads, boolean includeAttributeGroups) throws KettleException {
    // Clear the embedded named clusters.  We will be repopulating from steps that used named clusters
    getNamedClusterEmbedManager().clear();
    Props props = null;
    if (Props.isInitialized()) {
        props = Props.getInstance();
    }
    StringBuilder retval = new StringBuilder(800);
    retval.append(XMLHandler.openTag(XML_TAG)).append(Const.CR);
    retval.append("  ").append(XMLHandler.openTag(XML_TAG_INFO)).append(Const.CR);
    retval.append("    ").append(XMLHandler.addTagValue("name", name));
    retval.append("    ").append(XMLHandler.addTagValue("description", description));
    retval.append("    ").append(XMLHandler.addTagValue("extended_description", extendedDescription));
    retval.append("    ").append(XMLHandler.addTagValue("trans_version", trans_version));
    retval.append("    ").append(XMLHandler.addTagValue("trans_type", transformationType.getCode()));
    if (trans_status >= 0) {
        retval.append("    ").append(XMLHandler.addTagValue("trans_status", trans_status));
    }
    retval.append("    ").append(XMLHandler.addTagValue("directory", directory != null ? directory.getPath() : RepositoryDirectory.DIRECTORY_SEPARATOR));
    if (includeNamedParameters) {
        retval.append("    ").append(XMLHandler.openTag(XML_TAG_PARAMETERS)).append(Const.CR);
        String[] parameters = listParameters();
        for (int idx = 0; idx < parameters.length; idx++) {
            retval.append("      ").append(XMLHandler.openTag("parameter")).append(Const.CR);
            retval.append("        ").append(XMLHandler.addTagValue("name", parameters[idx]));
            retval.append("        ").append(XMLHandler.addTagValue("default_value", getParameterDefault(parameters[idx])));
            retval.append("        ").append(XMLHandler.addTagValue("description", getParameterDescription(parameters[idx])));
            retval.append("      ").append(XMLHandler.closeTag("parameter")).append(Const.CR);
        }
        retval.append("    ").append(XMLHandler.closeTag(XML_TAG_PARAMETERS)).append(Const.CR);
    }
    if (includeLog) {
        retval.append("    ").append(XMLHandler.openTag("log")).append(Const.CR);
        // Add the metadata for the various logging tables
        // 
        retval.append(transLogTable.getXML());
        retval.append(performanceLogTable.getXML());
        retval.append(channelLogTable.getXML());
        retval.append(stepLogTable.getXML());
        retval.append(metricsLogTable.getXML());
        retval.append("    ").append(XMLHandler.closeTag("log")).append(Const.CR);
    }
    retval.append("    ").append(XMLHandler.openTag("maxdate")).append(Const.CR);
    retval.append("      ").append(XMLHandler.addTagValue("connection", maxDateConnection == null ? "" : maxDateConnection.getName()));
    retval.append("      ").append(XMLHandler.addTagValue("table", maxDateTable));
    retval.append("      ").append(XMLHandler.addTagValue("field", maxDateField));
    retval.append("      ").append(XMLHandler.addTagValue("offset", maxDateOffset));
    retval.append("      ").append(XMLHandler.addTagValue("maxdiff", maxDateDifference));
    retval.append("    ").append(XMLHandler.closeTag("maxdate")).append(Const.CR);
    retval.append("    ").append(XMLHandler.addTagValue("size_rowset", sizeRowset));
    retval.append("    ").append(XMLHandler.addTagValue("sleep_time_empty", sleepTimeEmpty));
    retval.append("    ").append(XMLHandler.addTagValue("sleep_time_full", sleepTimeFull));
    retval.append("    ").append(XMLHandler.addTagValue("unique_connections", usingUniqueConnections));
    retval.append("    ").append(XMLHandler.addTagValue("feedback_shown", feedbackShown));
    retval.append("    ").append(XMLHandler.addTagValue("feedback_size", feedbackSize));
    retval.append("    ").append(XMLHandler.addTagValue("using_thread_priorities", usingThreadPriorityManagment));
    retval.append("    ").append(XMLHandler.addTagValue("shared_objects_file", sharedObjectsFile));
    // Performance monitoring
    // 
    retval.append("    ").append(XMLHandler.addTagValue("capture_step_performance", capturingStepPerformanceSnapShots));
    retval.append("    ").append(XMLHandler.addTagValue("step_performance_capturing_delay", stepPerformanceCapturingDelay));
    retval.append("    ").append(XMLHandler.addTagValue("step_performance_capturing_size_limit", stepPerformanceCapturingSizeLimit));
    if (includeDependencies) {
        retval.append("    ").append(XMLHandler.openTag(XML_TAG_DEPENDENCIES)).append(Const.CR);
        for (int i = 0; i < nrDependencies(); i++) {
            TransDependency td = getDependency(i);
            retval.append(td.getXML());
        }
        retval.append("    ").append(XMLHandler.closeTag(XML_TAG_DEPENDENCIES)).append(Const.CR);
    }
    // 
    if (includePartitions) {
        retval.append("    ").append(XMLHandler.openTag(XML_TAG_PARTITIONSCHEMAS)).append(Const.CR);
        for (int i = 0; i < partitionSchemas.size(); i++) {
            PartitionSchema partitionSchema = partitionSchemas.get(i);
            retval.append(partitionSchema.getXML());
        }
        retval.append("    ").append(XMLHandler.closeTag(XML_TAG_PARTITIONSCHEMAS)).append(Const.CR);
    }
    // 
    if (includeSlaves) {
        retval.append("    ").append(XMLHandler.openTag(XML_TAG_SLAVESERVERS)).append(Const.CR);
        for (int i = 0; i < slaveServers.size(); i++) {
            SlaveServer slaveServer = slaveServers.get(i);
            retval.append(slaveServer.getXML());
        }
        retval.append("    ").append(XMLHandler.closeTag(XML_TAG_SLAVESERVERS)).append(Const.CR);
    }
    // 
    if (includeClusters) {
        retval.append("    ").append(XMLHandler.openTag(XML_TAG_CLUSTERSCHEMAS)).append(Const.CR);
        for (int i = 0; i < clusterSchemas.size(); i++) {
            ClusterSchema clusterSchema = clusterSchemas.get(i);
            retval.append(clusterSchema.getXML());
        }
        retval.append("    ").append(XMLHandler.closeTag(XML_TAG_CLUSTERSCHEMAS)).append(Const.CR);
    }
    retval.append("    ").append(XMLHandler.addTagValue("created_user", createdUser));
    retval.append("    ").append(XMLHandler.addTagValue("created_date", XMLHandler.date2string(createdDate)));
    retval.append("    ").append(XMLHandler.addTagValue("modified_user", modifiedUser));
    retval.append("    ").append(XMLHandler.addTagValue("modified_date", XMLHandler.date2string(modifiedDate)));
    try {
        retval.append("    ").append(XMLHandler.addTagValue("key_for_session_key", keyForSessionKey));
    } catch (Exception ex) {
        log.logError("Unable to decode key", ex);
    }
    retval.append("    ").append(XMLHandler.addTagValue("is_key_private", isKeyPrivate));
    retval.append("  ").append(XMLHandler.closeTag(XML_TAG_INFO)).append(Const.CR);
    if (includeNotePads) {
        retval.append("  ").append(XMLHandler.openTag(XML_TAG_NOTEPADS)).append(Const.CR);
        if (notes != null) {
            for (int i = 0; i < nrNotes(); i++) {
                NotePadMeta ni = getNote(i);
                retval.append(ni.getXML());
            }
        }
        retval.append("  ").append(XMLHandler.closeTag(XML_TAG_NOTEPADS)).append(Const.CR);
    }
    // The database connections...
    if (includeDatabase) {
        for (int i = 0; i < nrDatabases(); i++) {
            DatabaseMeta dbMeta = getDatabase(i);
            if (props != null && props.areOnlyUsedConnectionsSavedToXML()) {
                if (isDatabaseConnectionUsed(dbMeta)) {
                    retval.append(dbMeta.getXML());
                }
            } else {
                retval.append(dbMeta.getXML());
            }
        }
    }
    if (includeSteps) {
        retval.append("  ").append(XMLHandler.openTag(XML_TAG_ORDER)).append(Const.CR);
        for (int i = 0; i < nrTransHops(); i++) {
            TransHopMeta transHopMeta = getTransHop(i);
            retval.append(transHopMeta.getXML());
        }
        retval.append("  ").append(XMLHandler.closeTag(XML_TAG_ORDER)).append(Const.CR);
        /* The steps... */
        for (int i = 0; i < nrSteps(); i++) {
            StepMeta stepMeta = getStep(i);
            if (stepMeta.getStepMetaInterface() instanceof HasRepositoryInterface) {
                ((HasRepositoryInterface) stepMeta.getStepMetaInterface()).setRepository(repository);
            }
            retval.append(stepMeta.getXML());
        }
        /* The error handling metadata on the steps */
        retval.append("  ").append(XMLHandler.openTag(XML_TAG_STEP_ERROR_HANDLING)).append(Const.CR);
        for (int i = 0; i < nrSteps(); i++) {
            StepMeta stepMeta = getStep(i);
            if (stepMeta.getStepErrorMeta() != null) {
                retval.append(stepMeta.getStepErrorMeta().getXML());
            }
        }
        retval.append("  ").append(XMLHandler.closeTag(XML_TAG_STEP_ERROR_HANDLING)).append(Const.CR);
    }
    // The slave-step-copy/partition distribution. Only used for slave transformations in a clustering environment.
    retval.append(slaveStepCopyPartitionDistribution.getXML());
    // Is this a slave transformation or not?
    retval.append("  ").append(XMLHandler.addTagValue("slave_transformation", slaveTransformation));
    // 
    if (includeAttributeGroups) {
        retval.append(AttributesUtil.getAttributesXml(attributesMap));
    }
    retval.append(XMLHandler.closeTag(XML_TAG)).append(Const.CR);
    return XMLFormatter.format(retval.toString());
}
Also used : PartitionSchema(org.pentaho.di.partition.PartitionSchema) Props(org.pentaho.di.core.Props) SlaveServer(org.pentaho.di.cluster.SlaveServer) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta) StepMeta(org.pentaho.di.trans.step.StepMeta) Point(org.pentaho.di.core.gui.Point) KettleExtensionPoint(org.pentaho.di.core.extension.KettleExtensionPoint) KettleXMLException(org.pentaho.di.core.exception.KettleXMLException) KettleRowException(org.pentaho.di.core.exception.KettleRowException) FileSystemException(org.apache.commons.vfs2.FileSystemException) KettleStepException(org.pentaho.di.core.exception.KettleStepException) IOException(java.io.IOException) KettleMissingPluginsException(org.pentaho.di.core.exception.KettleMissingPluginsException) KettleFileException(org.pentaho.di.core.exception.KettleFileException) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) HasRepositoryInterface(org.pentaho.di.repository.HasRepositoryInterface) NotePadMeta(org.pentaho.di.core.NotePadMeta) ClusterSchema(org.pentaho.di.cluster.ClusterSchema)

Example 42 with DatabaseMeta

use of org.pentaho.di.core.database.DatabaseMeta in project pentaho-kettle by pentaho.

the class TransMeta method loadXML.

/**
 * Parses an XML DOM (starting at the specified Node) that describes the transformation.
 *
 * @param transnode
 *          The XML node to load from
 * @param fname
 *          The filename
 * @param rep
 *          The repository to load the default list of database connections from (null if no repository is available)
 * @param setInternalVariables
 *          true if you want to set the internal variables based on this transformation information
 * @param parentVariableSpace
 *          the parent variable space to use during TransMeta construction
 * @param prompter
 *          the changed/replace listener or null if there is none
 * @throws KettleXMLException
 *           if any errors occur during parsing of the specified file
 * @throws KettleMissingPluginsException
 *           in case missing plugins were found (details are in the exception in that case)
 */
public void loadXML(Node transnode, String fname, IMetaStore metaStore, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace, OverwritePrompter prompter) throws KettleXMLException, KettleMissingPluginsException {
    KettleMissingPluginsException missingPluginsException = new KettleMissingPluginsException(BaseMessages.getString(PKG, "TransMeta.MissingPluginsFoundWhileLoadingTransformation.Exception"));
    // Remember this as the primary meta store.
    this.metaStore = metaStore;
    try {
        Props props = null;
        if (Props.isInitialized()) {
            props = Props.getInstance();
        }
        initializeVariablesFrom(parentVariableSpace);
        try {
            // Clear the transformation
            clear();
            // Set the filename here so it can be used in variables for ALL aspects of the transformation FIX: PDI-8890
            if (null == rep) {
                setFilename(fname);
            } else {
                // Set the repository here so it can be used in variables for ALL aspects of the job FIX: PDI-16441
                setRepository(rep);
            }
            // 
            try {
                sharedObjectsFile = XMLHandler.getTagValue(transnode, "info", "shared_objects_file");
                sharedObjects = rep != null ? rep.readTransSharedObjects(this) : readSharedObjects();
            } catch (Exception e) {
                log.logError(BaseMessages.getString(PKG, "TransMeta.ErrorReadingSharedObjects.Message", e.toString()));
                log.logError(Const.getStackTracker(e));
            }
            // Load the database connections, slave servers, cluster schemas & partition schemas into this object.
            // 
            importFromMetaStore();
            // Handle connections
            int n = XMLHandler.countNodes(transnode, DatabaseMeta.XML_TAG);
            Set<String> privateTransformationDatabases = new HashSet<>(n);
            if (log.isDebug()) {
                log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.WeHaveConnections", String.valueOf(n)));
            }
            for (int i = 0; i < n; i++) {
                if (log.isDebug()) {
                    log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.LookingAtConnection") + i);
                }
                Node nodecon = XMLHandler.getSubNodeByNr(transnode, DatabaseMeta.XML_TAG, i);
                DatabaseMeta dbcon = new DatabaseMeta(nodecon);
                dbcon.shareVariablesWith(this);
                if (!dbcon.isShared()) {
                    privateTransformationDatabases.add(dbcon.getName());
                }
                DatabaseMeta exist = findDatabase(dbcon.getName());
                if (exist == null) {
                    addDatabase(dbcon);
                } else {
                    if (!exist.isShared()) {
                        // otherwise, we just keep the shared connection.
                        if (shouldOverwrite(prompter, props, BaseMessages.getString(PKG, "TransMeta.Message.OverwriteConnectionYN", dbcon.getName()), BaseMessages.getString(PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage"))) {
                            int idx = indexOfDatabase(exist);
                            removeDatabase(idx);
                            addDatabase(idx, dbcon);
                        }
                    }
                }
            }
            setPrivateDatabases(privateTransformationDatabases);
            // Read the notes...
            Node notepadsnode = XMLHandler.getSubNode(transnode, XML_TAG_NOTEPADS);
            int nrnotes = XMLHandler.countNodes(notepadsnode, NotePadMeta.XML_TAG);
            for (int i = 0; i < nrnotes; i++) {
                Node notepadnode = XMLHandler.getSubNodeByNr(notepadsnode, NotePadMeta.XML_TAG, i);
                NotePadMeta ni = new NotePadMeta(notepadnode);
                notes.add(ni);
            }
            // Handle Steps
            int s = XMLHandler.countNodes(transnode, StepMeta.XML_TAG);
            if (log.isDebug()) {
                log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.ReadingSteps") + s + " steps...");
            }
            for (int i = 0; i < s; i++) {
                Node stepnode = XMLHandler.getSubNodeByNr(transnode, StepMeta.XML_TAG, i);
                if (log.isDebug()) {
                    log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.LookingAtStep") + i);
                }
                StepMeta stepMeta = new StepMeta(stepnode, databases, metaStore);
                // for tracing, retain hierarchy
                stepMeta.setParentTransMeta(this);
                if (stepMeta.isMissing()) {
                    addMissingTrans((MissingTrans) stepMeta.getStepMetaInterface());
                }
                // Check if the step exists and if it's a shared step.
                // If so, then we will keep the shared version, not this one.
                // The stored XML is only for backup purposes.
                // 
                StepMeta check = findStep(stepMeta.getName());
                if (check != null) {
                    if (!check.isShared()) {
                        // Don't overwrite shared objects
                        addOrReplaceStep(stepMeta);
                    } else {
                        // Just keep the drawn flag and location
                        check.setDraw(stepMeta.isDrawn());
                        check.setLocation(stepMeta.getLocation());
                    }
                } else {
                    // simply add it.
                    addStep(stepMeta);
                }
            }
            // Read the error handling code of the steps...
            // 
            Node errorHandlingNode = XMLHandler.getSubNode(transnode, XML_TAG_STEP_ERROR_HANDLING);
            int nrErrorHandlers = XMLHandler.countNodes(errorHandlingNode, StepErrorMeta.XML_ERROR_TAG);
            for (int i = 0; i < nrErrorHandlers; i++) {
                Node stepErrorMetaNode = XMLHandler.getSubNodeByNr(errorHandlingNode, StepErrorMeta.XML_ERROR_TAG, i);
                StepErrorMeta stepErrorMeta = new StepErrorMeta(this, stepErrorMetaNode, steps);
                if (stepErrorMeta.getSourceStep() != null) {
                    // a bit of a trick, I know.
                    stepErrorMeta.getSourceStep().setStepErrorMeta(stepErrorMeta);
                }
            }
            // 
            for (int i = 0; i < nrSteps(); i++) {
                StepMeta stepMeta = getStep(i);
                StepMetaInterface sii = stepMeta.getStepMetaInterface();
                if (sii != null) {
                    sii.searchInfoAndTargetSteps(steps);
                }
            }
            // Handle Hops
            // 
            Node ordernode = XMLHandler.getSubNode(transnode, XML_TAG_ORDER);
            n = XMLHandler.countNodes(ordernode, TransHopMeta.XML_HOP_TAG);
            if (log.isDebug()) {
                log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.WeHaveHops") + n + " hops...");
            }
            for (int i = 0; i < n; i++) {
                if (log.isDebug()) {
                    log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.LookingAtHop") + i);
                }
                Node hopnode = XMLHandler.getSubNodeByNr(ordernode, TransHopMeta.XML_HOP_TAG, i);
                TransHopMeta hopinf = new TransHopMeta(hopnode, steps);
                hopinf.setErrorHop(isErrorNode(errorHandlingNode, hopnode));
                addTransHop(hopinf);
            }
            // 
            // get transformation info:
            // 
            Node infonode = XMLHandler.getSubNode(transnode, XML_TAG_INFO);
            // Name
            // 
            setName(XMLHandler.getTagValue(infonode, "name"));
            // description
            // 
            description = XMLHandler.getTagValue(infonode, "description");
            // extended description
            // 
            extendedDescription = XMLHandler.getTagValue(infonode, "extended_description");
            // trans version
            // 
            trans_version = XMLHandler.getTagValue(infonode, "trans_version");
            // trans status
            // 
            trans_status = Const.toInt(XMLHandler.getTagValue(infonode, "trans_status"), -1);
            String transTypeCode = XMLHandler.getTagValue(infonode, "trans_type");
            transformationType = TransformationType.getTransformationTypeByCode(transTypeCode);
            // 
            if (rep != null) {
                String directoryPath = XMLHandler.getTagValue(infonode, "directory");
                if (directoryPath != null) {
                    directory = rep.findDirectory(directoryPath);
                    if (directory == null) {
                        // not found
                        // The root as default
                        directory = new RepositoryDirectory();
                    }
                }
            }
            // Read logging table information
            // 
            Node logNode = XMLHandler.getSubNode(infonode, "log");
            if (logNode != null) {
                // Backward compatibility...
                // 
                Node transLogNode = XMLHandler.getSubNode(logNode, TransLogTable.XML_TAG);
                if (transLogNode == null) {
                    // Load the XML
                    // 
                    transLogTable.findField(TransLogTable.ID.LINES_READ).setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "read")));
                    transLogTable.findField(TransLogTable.ID.LINES_WRITTEN).setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "write")));
                    transLogTable.findField(TransLogTable.ID.LINES_INPUT).setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "input")));
                    transLogTable.findField(TransLogTable.ID.LINES_OUTPUT).setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "output")));
                    transLogTable.findField(TransLogTable.ID.LINES_UPDATED).setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "update")));
                    transLogTable.findField(TransLogTable.ID.LINES_REJECTED).setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "rejected")));
                    transLogTable.setConnectionName(XMLHandler.getTagValue(infonode, "log", "connection"));
                    transLogTable.setSchemaName(XMLHandler.getTagValue(infonode, "log", "schema"));
                    transLogTable.setTableName(XMLHandler.getTagValue(infonode, "log", "table"));
                    transLogTable.findField(TransLogTable.ID.ID_BATCH).setEnabled("Y".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "log", "use_batchid")));
                    transLogTable.findField(TransLogTable.ID.LOG_FIELD).setEnabled("Y".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "log", "USE_LOGFIELD")));
                    transLogTable.setLogSizeLimit(XMLHandler.getTagValue(infonode, "log", "size_limit_lines"));
                    transLogTable.setLogInterval(XMLHandler.getTagValue(infonode, "log", "interval"));
                    transLogTable.findField(TransLogTable.ID.CHANNEL_ID).setEnabled(false);
                    transLogTable.findField(TransLogTable.ID.LINES_REJECTED).setEnabled(false);
                    performanceLogTable.setConnectionName(transLogTable.getConnectionName());
                    performanceLogTable.setTableName(XMLHandler.getTagValue(infonode, "log", "step_performance_table"));
                } else {
                    transLogTable.loadXML(transLogNode, databases, steps);
                }
                Node perfLogNode = XMLHandler.getSubNode(logNode, PerformanceLogTable.XML_TAG);
                if (perfLogNode != null) {
                    performanceLogTable.loadXML(perfLogNode, databases, steps);
                }
                Node channelLogNode = XMLHandler.getSubNode(logNode, ChannelLogTable.XML_TAG);
                if (channelLogNode != null) {
                    channelLogTable.loadXML(channelLogNode, databases, steps);
                }
                Node stepLogNode = XMLHandler.getSubNode(logNode, StepLogTable.XML_TAG);
                if (stepLogNode != null) {
                    stepLogTable.loadXML(stepLogNode, databases, steps);
                }
                Node metricsLogNode = XMLHandler.getSubNode(logNode, MetricsLogTable.XML_TAG);
                if (metricsLogNode != null) {
                    metricsLogTable.loadXML(metricsLogNode, databases, steps);
                }
            }
            // Maxdate range options...
            String maxdatcon = XMLHandler.getTagValue(infonode, "maxdate", "connection");
            maxDateConnection = findDatabase(maxdatcon);
            maxDateTable = XMLHandler.getTagValue(infonode, "maxdate", "table");
            maxDateField = XMLHandler.getTagValue(infonode, "maxdate", "field");
            String offset = XMLHandler.getTagValue(infonode, "maxdate", "offset");
            maxDateOffset = Const.toDouble(offset, 0.0);
            String mdiff = XMLHandler.getTagValue(infonode, "maxdate", "maxdiff");
            maxDateDifference = Const.toDouble(mdiff, 0.0);
            // Check the dependencies as far as dates are concerned...
            // We calculate BEFORE we run the MAX of these dates
            // If the date is larger then enddate, startdate is set to MIN_DATE
            // 
            Node depsNode = XMLHandler.getSubNode(infonode, XML_TAG_DEPENDENCIES);
            int nrDeps = XMLHandler.countNodes(depsNode, TransDependency.XML_TAG);
            for (int i = 0; i < nrDeps; i++) {
                Node depNode = XMLHandler.getSubNodeByNr(depsNode, TransDependency.XML_TAG, i);
                TransDependency transDependency = new TransDependency(depNode, databases);
                if (transDependency.getDatabase() != null && transDependency.getFieldname() != null) {
                    addDependency(transDependency);
                }
            }
            // Read the named parameters.
            Node paramsNode = XMLHandler.getSubNode(infonode, XML_TAG_PARAMETERS);
            int nrParams = XMLHandler.countNodes(paramsNode, "parameter");
            for (int i = 0; i < nrParams; i++) {
                Node paramNode = XMLHandler.getSubNodeByNr(paramsNode, "parameter", i);
                String paramName = XMLHandler.getTagValue(paramNode, "name");
                String defaultValue = XMLHandler.getTagValue(paramNode, "default_value");
                String descr = XMLHandler.getTagValue(paramNode, "description");
                addParameterDefinition(paramName, defaultValue, descr);
            }
            // Read the partitioning schemas
            // 
            Node partSchemasNode = XMLHandler.getSubNode(infonode, XML_TAG_PARTITIONSCHEMAS);
            int nrPartSchemas = XMLHandler.countNodes(partSchemasNode, PartitionSchema.XML_TAG);
            for (int i = 0; i < nrPartSchemas; i++) {
                Node partSchemaNode = XMLHandler.getSubNodeByNr(partSchemasNode, PartitionSchema.XML_TAG, i);
                PartitionSchema partitionSchema = new PartitionSchema(partSchemaNode);
                // Check if the step exists and if it's a shared step.
                // If so, then we will keep the shared version, not this one.
                // The stored XML is only for backup purposes.
                // 
                PartitionSchema check = findPartitionSchema(partitionSchema.getName());
                if (check != null) {
                    if (!check.isShared()) {
                        // we don't overwrite shared objects.
                        if (shouldOverwrite(prompter, props, BaseMessages.getString(PKG, "TransMeta.Message.OverwritePartitionSchemaYN", partitionSchema.getName()), BaseMessages.getString(PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage"))) {
                            addOrReplacePartitionSchema(partitionSchema);
                        }
                    }
                } else {
                    partitionSchemas.add(partitionSchema);
                }
            }
            // 
            for (int i = 0; i < nrSteps(); i++) {
                StepPartitioningMeta stepPartitioningMeta = getStep(i).getStepPartitioningMeta();
                if (stepPartitioningMeta != null) {
                    stepPartitioningMeta.setPartitionSchemaAfterLoading(partitionSchemas);
                }
                StepPartitioningMeta targetStepPartitioningMeta = getStep(i).getTargetStepPartitioningMeta();
                if (targetStepPartitioningMeta != null) {
                    targetStepPartitioningMeta.setPartitionSchemaAfterLoading(partitionSchemas);
                }
            }
            // Read the slave servers...
            // 
            Node slaveServersNode = XMLHandler.getSubNode(infonode, XML_TAG_SLAVESERVERS);
            int nrSlaveServers = XMLHandler.countNodes(slaveServersNode, SlaveServer.XML_TAG);
            for (int i = 0; i < nrSlaveServers; i++) {
                Node slaveServerNode = XMLHandler.getSubNodeByNr(slaveServersNode, SlaveServer.XML_TAG, i);
                SlaveServer slaveServer = new SlaveServer(slaveServerNode);
                if (slaveServer.getName() == null) {
                    log.logError(BaseMessages.getString(PKG, "TransMeta.Log.WarningWhileCreationSlaveServer", slaveServer.getName()));
                    continue;
                }
                slaveServer.shareVariablesWith(this);
                // Check if the object exists and if it's a shared object.
                // If so, then we will keep the shared version, not this one.
                // The stored XML is only for backup purposes.
                SlaveServer check = findSlaveServer(slaveServer.getName());
                if (check != null) {
                    if (!check.isShared()) {
                        // we don't overwrite shared objects.
                        if (shouldOverwrite(prompter, props, BaseMessages.getString(PKG, "TransMeta.Message.OverwriteSlaveServerYN", slaveServer.getName()), BaseMessages.getString(PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage"))) {
                            addOrReplaceSlaveServer(slaveServer);
                        }
                    }
                } else {
                    slaveServers.add(slaveServer);
                }
            }
            // Read the cluster schemas
            // 
            Node clusterSchemasNode = XMLHandler.getSubNode(infonode, XML_TAG_CLUSTERSCHEMAS);
            int nrClusterSchemas = XMLHandler.countNodes(clusterSchemasNode, ClusterSchema.XML_TAG);
            for (int i = 0; i < nrClusterSchemas; i++) {
                Node clusterSchemaNode = XMLHandler.getSubNodeByNr(clusterSchemasNode, ClusterSchema.XML_TAG, i);
                ClusterSchema clusterSchema = new ClusterSchema(clusterSchemaNode, slaveServers);
                clusterSchema.shareVariablesWith(this);
                // Check if the object exists and if it's a shared object.
                // If so, then we will keep the shared version, not this one.
                // The stored XML is only for backup purposes.
                ClusterSchema check = findClusterSchema(clusterSchema.getName());
                if (check != null) {
                    if (!check.isShared()) {
                        // we don't overwrite shared objects.
                        if (shouldOverwrite(prompter, props, BaseMessages.getString(PKG, "TransMeta.Message.OverwriteClusterSchemaYN", clusterSchema.getName()), BaseMessages.getString(PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage"))) {
                            addOrReplaceClusterSchema(clusterSchema);
                        }
                    }
                } else {
                    clusterSchemas.add(clusterSchema);
                }
            }
            // 
            for (int i = 0; i < nrSteps(); i++) {
                getStep(i).setClusterSchemaAfterLoading(clusterSchemas);
            }
            String srowset = XMLHandler.getTagValue(infonode, "size_rowset");
            sizeRowset = Const.toInt(srowset, Const.ROWS_IN_ROWSET);
            sleepTimeEmpty = Const.toInt(XMLHandler.getTagValue(infonode, "sleep_time_empty"), Const.TIMEOUT_GET_MILLIS);
            sleepTimeFull = Const.toInt(XMLHandler.getTagValue(infonode, "sleep_time_full"), Const.TIMEOUT_PUT_MILLIS);
            usingUniqueConnections = "Y".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "unique_connections"));
            feedbackShown = !"N".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "feedback_shown"));
            feedbackSize = Const.toInt(XMLHandler.getTagValue(infonode, "feedback_size"), Const.ROWS_UPDATE);
            usingThreadPriorityManagment = !"N".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "using_thread_priorities"));
            // Performance monitoring for steps...
            // 
            capturingStepPerformanceSnapShots = "Y".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "capture_step_performance"));
            stepPerformanceCapturingDelay = Const.toLong(XMLHandler.getTagValue(infonode, "step_performance_capturing_delay"), 1000);
            stepPerformanceCapturingSizeLimit = XMLHandler.getTagValue(infonode, "step_performance_capturing_size_limit");
            // Created user/date
            createdUser = XMLHandler.getTagValue(infonode, "created_user");
            String createDate = XMLHandler.getTagValue(infonode, "created_date");
            if (createDate != null) {
                createdDate = XMLHandler.stringToDate(createDate);
            }
            // Changed user/date
            modifiedUser = XMLHandler.getTagValue(infonode, "modified_user");
            String modDate = XMLHandler.getTagValue(infonode, "modified_date");
            if (modDate != null) {
                modifiedDate = XMLHandler.stringToDate(modDate);
            }
            Node partitionDistNode = XMLHandler.getSubNode(transnode, SlaveStepCopyPartitionDistribution.XML_TAG);
            if (partitionDistNode != null) {
                slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution(partitionDistNode);
            } else {
                // leave empty
                slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution();
            }
            // Is this a slave transformation?
            // 
            slaveTransformation = "Y".equalsIgnoreCase(XMLHandler.getTagValue(transnode, "slave_transformation"));
            if (log.isDebug()) {
                log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.NumberOfStepsReaded") + nrSteps());
                log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.NumberOfHopsReaded") + nrTransHops());
            }
            sortSteps();
            // Load the attribute groups map
            // 
            attributesMap = AttributesUtil.loadAttributes(XMLHandler.getSubNode(transnode, AttributesUtil.XML_TAG));
            keyForSessionKey = XMLHandler.stringToBinary(XMLHandler.getTagValue(infonode, "key_for_session_key"));
            isKeyPrivate = "Y".equals(XMLHandler.getTagValue(infonode, "is_key_private"));
        } catch (KettleXMLException xe) {
            throw new KettleXMLException(BaseMessages.getString(PKG, "TransMeta.Exception.ErrorReadingTransformation"), xe);
        } catch (KettleException e) {
            throw new KettleXMLException(e);
        } finally {
            initializeVariablesFrom(null);
            if (setInternalVariables) {
                setInternalKettleVariables();
            }
            ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.TransformationMetaLoaded.id, this);
        }
    } catch (Exception e) {
        // 
        if (!missingPluginsException.getMissingPluginDetailsList().isEmpty()) {
            throw missingPluginsException;
        } else {
            throw new KettleXMLException(BaseMessages.getString(PKG, "TransMeta.Exception.ErrorReadingTransformation"), e);
        }
    } finally {
        if (!missingPluginsException.getMissingPluginDetailsList().isEmpty()) {
            throw missingPluginsException;
        }
    }
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) RepositoryDirectory(org.pentaho.di.repository.RepositoryDirectory) KettleMissingPluginsException(org.pentaho.di.core.exception.KettleMissingPluginsException) PartitionSchema(org.pentaho.di.partition.PartitionSchema) Node(org.w3c.dom.Node) StepErrorMeta(org.pentaho.di.trans.step.StepErrorMeta) StepMetaInterface(org.pentaho.di.trans.step.StepMetaInterface) Props(org.pentaho.di.core.Props) StepPartitioningMeta(org.pentaho.di.trans.step.StepPartitioningMeta) SlaveServer(org.pentaho.di.cluster.SlaveServer) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta) StepMeta(org.pentaho.di.trans.step.StepMeta) KettleXMLException(org.pentaho.di.core.exception.KettleXMLException) KettleRowException(org.pentaho.di.core.exception.KettleRowException) FileSystemException(org.apache.commons.vfs2.FileSystemException) KettleStepException(org.pentaho.di.core.exception.KettleStepException) IOException(java.io.IOException) KettleMissingPluginsException(org.pentaho.di.core.exception.KettleMissingPluginsException) KettleFileException(org.pentaho.di.core.exception.KettleFileException) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) Point(org.pentaho.di.core.gui.Point) KettleExtensionPoint(org.pentaho.di.core.extension.KettleExtensionPoint) KettleXMLException(org.pentaho.di.core.exception.KettleXMLException) NotePadMeta(org.pentaho.di.core.NotePadMeta) ClusterSchema(org.pentaho.di.cluster.ClusterSchema) HashSet(java.util.HashSet)

Example 43 with DatabaseMeta

use of org.pentaho.di.core.database.DatabaseMeta in project pentaho-kettle by pentaho.

the class TransMeta method realClone.

/**
 * Perform a real clone of the transformation meta-data object, including cloning all lists and copying all values. If
 * the doClear parameter is true, the clone will be cleared of ALL values before the copy. If false, only the copied
 * fields will be cleared.
 *
 * @param doClear
 *          Whether to clear all of the clone's data before copying from the source object
 * @return a real clone of the calling object
 */
public Object realClone(boolean doClear) {
    try {
        TransMeta transMeta = (TransMeta) super.clone();
        if (doClear) {
            transMeta.clear();
        } else {
            // Clear out the things we're replacing below
            transMeta.databases = new ArrayList<>();
            transMeta.steps = new ArrayList<>();
            transMeta.hops = new ArrayList<>();
            transMeta.notes = new ArrayList<>();
            transMeta.dependencies = new ArrayList<>();
            transMeta.partitionSchemas = new ArrayList<>();
            transMeta.slaveServers = new ArrayList<>();
            transMeta.clusterSchemas = new ArrayList<>();
            transMeta.namedParams = new NamedParamsDefault();
            transMeta.stepChangeListeners = new ArrayList<>();
        }
        for (DatabaseMeta db : databases) {
            transMeta.addDatabase((DatabaseMeta) db.clone());
        }
        for (StepMeta step : steps) {
            transMeta.addStep((StepMeta) step.clone());
        }
        // PDI-15799: Step references are original yet. Set them to the clones.
        for (StepMeta step : transMeta.getSteps()) {
            final StepMetaInterface stepMetaInterface = step.getStepMetaInterface();
            if (stepMetaInterface != null) {
                final StepIOMetaInterface stepIOMeta = stepMetaInterface.getStepIOMeta();
                if (stepIOMeta != null) {
                    for (StreamInterface stream : stepIOMeta.getInfoStreams()) {
                        String streamStepName = stream.getStepname();
                        if (streamStepName != null) {
                            StepMeta streamStepMeta = transMeta.findStep(streamStepName);
                            stream.setStepMeta(streamStepMeta);
                        }
                    }
                }
            }
        }
        for (TransHopMeta hop : hops) {
            transMeta.addTransHop((TransHopMeta) hop.clone());
        }
        for (NotePadMeta note : notes) {
            transMeta.addNote((NotePadMeta) note.clone());
        }
        for (TransDependency dep : dependencies) {
            transMeta.addDependency((TransDependency) dep.clone());
        }
        for (SlaveServer slave : slaveServers) {
            transMeta.getSlaveServers().add((SlaveServer) slave.clone());
        }
        for (ClusterSchema schema : clusterSchemas) {
            transMeta.getClusterSchemas().add(schema.clone());
        }
        for (PartitionSchema schema : partitionSchemas) {
            transMeta.getPartitionSchemas().add((PartitionSchema) schema.clone());
        }
        for (String key : listParameters()) {
            transMeta.addParameterDefinition(key, getParameterDefault(key), getParameterDescription(key));
        }
        return transMeta;
    } catch (Exception e) {
        e.printStackTrace();
        return null;
    }
}
Also used : NamedParamsDefault(org.pentaho.di.core.parameters.NamedParamsDefault) PartitionSchema(org.pentaho.di.partition.PartitionSchema) StepMetaInterface(org.pentaho.di.trans.step.StepMetaInterface) StepIOMetaInterface(org.pentaho.di.trans.step.StepIOMetaInterface) SlaveServer(org.pentaho.di.cluster.SlaveServer) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta) StepMeta(org.pentaho.di.trans.step.StepMeta) KettleXMLException(org.pentaho.di.core.exception.KettleXMLException) KettleRowException(org.pentaho.di.core.exception.KettleRowException) FileSystemException(org.apache.commons.vfs2.FileSystemException) KettleStepException(org.pentaho.di.core.exception.KettleStepException) IOException(java.io.IOException) KettleMissingPluginsException(org.pentaho.di.core.exception.KettleMissingPluginsException) KettleFileException(org.pentaho.di.core.exception.KettleFileException) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) NotePadMeta(org.pentaho.di.core.NotePadMeta) ClusterSchema(org.pentaho.di.cluster.ClusterSchema) StreamInterface(org.pentaho.di.trans.step.errorhandling.StreamInterface)

Example 44 with DatabaseMeta

use of org.pentaho.di.core.database.DatabaseMeta in project pentaho-kettle by pentaho.

the class Trans method calculateBatchIdAndDateRange.

/**
 * Calculate the batch id and date range for the transformation.
 *
 * @throws KettleTransException if there are any errors during calculation
 */
public void calculateBatchIdAndDateRange() throws KettleTransException {
    TransLogTable transLogTable = transMeta.getTransLogTable();
    currentDate = new Date();
    logDate = new Date();
    startDate = Const.MIN_DATE;
    endDate = currentDate;
    DatabaseMeta logConnection = transLogTable.getDatabaseMeta();
    String logTable = environmentSubstitute(transLogTable.getActualTableName());
    String logSchema = environmentSubstitute(transLogTable.getActualSchemaName());
    try {
        if (logConnection != null) {
            String logSchemaAndTable = logConnection.getQuotedSchemaTableCombination(logSchema, logTable);
            if (Utils.isEmpty(logTable)) {
                // to log to.
                throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.NoLogTableDefined"));
            }
            if (Utils.isEmpty(transMeta.getName()) && logTable != null) {
                throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.NoTransnameAvailableForLogging"));
            }
            transLogTableDatabaseConnection = new Database(this, logConnection);
            transLogTableDatabaseConnection.shareVariablesWith(this);
            if (log.isDetailed()) {
                log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.OpeningLogConnection", "" + logConnection));
            }
            transLogTableDatabaseConnection.connect();
            transLogTableDatabaseConnection.setCommit(logCommitSize);
            // 
            if (transLogTable.isBatchIdUsed()) {
                Long id_batch = logConnection.getNextBatchId(transLogTableDatabaseConnection, logSchema, logTable, transLogTable.getKeyField().getFieldName());
                setBatchId(id_batch.longValue());
            }
            // 
            // Get the date range from the logging table: from the last end_date to now. (currentDate)
            // 
            Object[] lastr = transLogTableDatabaseConnection.getLastLogDate(logSchemaAndTable, transMeta.getName(), false, LogStatus.END);
            if (lastr != null && lastr.length > 0) {
                startDate = (Date) lastr[0];
                if (log.isDetailed()) {
                    log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.StartDateFound") + startDate);
                }
            }
            // 
            if (transMeta.getMaxDateConnection() != null && transMeta.getMaxDateTable() != null && transMeta.getMaxDateTable().length() > 0 && transMeta.getMaxDateField() != null && transMeta.getMaxDateField().length() > 0) {
                if (log.isDetailed()) {
                    log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.LookingForMaxdateConnection", "" + transMeta.getMaxDateConnection()));
                }
                DatabaseMeta maxcon = transMeta.getMaxDateConnection();
                if (maxcon != null) {
                    Database maxdb = new Database(this, maxcon);
                    maxdb.shareVariablesWith(this);
                    try {
                        if (log.isDetailed()) {
                            log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.OpeningMaximumDateConnection"));
                        }
                        maxdb.connect();
                        maxdb.setCommit(logCommitSize);
                        // 
                        // Determine the endDate by looking at a field in a table...
                        // 
                        String sql = "SELECT MAX(" + transMeta.getMaxDateField() + ") FROM " + transMeta.getMaxDateTable();
                        RowMetaAndData r1 = maxdb.getOneRow(sql);
                        if (r1 != null) {
                            // OK, we have a value, what's the offset?
                            Date maxvalue = r1.getRowMeta().getDate(r1.getData(), 0);
                            if (maxvalue != null) {
                                if (log.isDetailed()) {
                                    log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.LastDateFoundOnTheMaxdateConnection") + r1);
                                }
                                endDate.setTime((long) (maxvalue.getTime() + (transMeta.getMaxDateOffset() * 1000)));
                            }
                        } else {
                            if (log.isDetailed()) {
                                log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.NoLastDateFoundOnTheMaxdateConnection"));
                            }
                        }
                    } catch (KettleException e) {
                        throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ErrorConnectingToDatabase", "" + transMeta.getMaxDateConnection()), e);
                    } finally {
                        maxdb.disconnect();
                    }
                } else {
                    throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.MaximumDateConnectionCouldNotBeFound", "" + transMeta.getMaxDateConnection()));
                }
            }
            // Get the maximum in depdate...
            if (transMeta.nrDependencies() > 0) {
                if (log.isDetailed()) {
                    log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.CheckingForMaxDependencyDate"));
                }
                // 
                // Maybe one of the tables where this transformation is dependent on has changed?
                // If so we need to change the start-date!
                // 
                depDate = Const.MIN_DATE;
                Date maxdepdate = Const.MIN_DATE;
                if (lastr != null && lastr.length > 0) {
                    // #1: last depdate
                    Date dep = (Date) lastr[1];
                    if (dep != null) {
                        maxdepdate = dep;
                        depDate = dep;
                    }
                }
                for (int i = 0; i < transMeta.nrDependencies(); i++) {
                    TransDependency td = transMeta.getDependency(i);
                    DatabaseMeta depcon = td.getDatabase();
                    if (depcon != null) {
                        Database depdb = new Database(this, depcon);
                        try {
                            depdb.connect();
                            depdb.setCommit(logCommitSize);
                            String sql = "SELECT MAX(" + td.getFieldname() + ") FROM " + td.getTablename();
                            RowMetaAndData r1 = depdb.getOneRow(sql);
                            if (r1 != null) {
                                // OK, we have a row, get the result!
                                Date maxvalue = (Date) r1.getData()[0];
                                if (maxvalue != null) {
                                    if (log.isDetailed()) {
                                        log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.FoundDateFromTable", td.getTablename(), "." + td.getFieldname(), " = " + maxvalue.toString()));
                                    }
                                    if (maxvalue.getTime() > maxdepdate.getTime()) {
                                        maxdepdate = maxvalue;
                                    }
                                } else {
                                    throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.UnableToGetDependencyInfoFromDB", td.getDatabase().getName() + ".", td.getTablename() + ".", td.getFieldname()));
                                }
                            } else {
                                throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.UnableToGetDependencyInfoFromDB", td.getDatabase().getName() + ".", td.getTablename() + ".", td.getFieldname()));
                            }
                        } catch (KettleException e) {
                            throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ErrorInDatabase", "" + td.getDatabase()), e);
                        } finally {
                            depdb.disconnect();
                        }
                    } else {
                        throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ConnectionCouldNotBeFound", "" + td.getDatabase()));
                    }
                    if (log.isDetailed()) {
                        log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.Maxdepdate") + (XMLHandler.date2string(maxdepdate)));
                    }
                }
                // 
                if (maxdepdate.getTime() > depDate.getTime()) {
                    depDate = maxdepdate;
                    startDate = Const.MIN_DATE;
                }
            } else {
                depDate = currentDate;
            }
        }
        // OK, now we have a date-range. See if we need to set a maximum!
        if (// Do we have a difference specified?
        transMeta.getMaxDateDifference() > 0.0 && startDate.getTime() > Const.MIN_DATE.getTime()) {
            // Is the startdate > Minimum?
            // See if the end-date is larger then Start_date + DIFF?
            Date maxdesired = new Date(startDate.getTime() + ((long) transMeta.getMaxDateDifference() * 1000));
            // 
            if (endDate.compareTo(maxdesired) > 0) {
                endDate = maxdesired;
            }
        }
    } catch (KettleException e) {
        throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ErrorCalculatingDateRange", logTable), e);
    }
// Be careful, We DO NOT close the trans log table database connection!!!
// It's closed later in beginProcessing() to prevent excessive connect/disconnect repetitions.
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) ValueMetaString(org.pentaho.di.core.row.value.ValueMetaString) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta) Date(java.util.Date) KettleExtensionPoint(org.pentaho.di.core.extension.KettleExtensionPoint) RowMetaAndData(org.pentaho.di.core.RowMetaAndData) Database(org.pentaho.di.core.database.Database) TransLogTable(org.pentaho.di.core.logging.TransLogTable) KettleTransException(org.pentaho.di.core.exception.KettleTransException) FileObject(org.apache.commons.vfs2.FileObject)

Example 45 with DatabaseMeta

use of org.pentaho.di.core.database.DatabaseMeta in project pentaho-kettle by pentaho.

the class Trans method endProcessing.

/**
 * End processing. Also handle any logging operations associated with the end of a transformation
 *
 * @return true if all end processing is successful, false otherwise
 * @throws KettleException if any errors occur during processing
 */
private synchronized boolean endProcessing() throws KettleException {
    LogStatus status;
    if (isStopped()) {
        status = LogStatus.STOP;
    } else if (isFinished()) {
        status = LogStatus.END;
    } else if (isPaused()) {
        status = LogStatus.PAUSED;
    } else {
        status = LogStatus.RUNNING;
    }
    TransLogTable transLogTable = transMeta.getTransLogTable();
    int intervalInSeconds = Const.toInt(environmentSubstitute(transLogTable.getLogInterval()), -1);
    logDate = new Date();
    // OK, we have some logging to do...
    // 
    DatabaseMeta logcon = transMeta.getTransLogTable().getDatabaseMeta();
    String logTable = transMeta.getTransLogTable().getActualTableName();
    if (logcon != null) {
        Database ldb = null;
        try {
            // 
            if (transLogTableDatabaseConnection == null) {
                ldb = new Database(this, logcon);
                ldb.shareVariablesWith(this);
                ldb.connect();
                ldb.setCommit(logCommitSize);
                transLogTableDatabaseConnection = ldb;
            } else {
                ldb = transLogTableDatabaseConnection;
            }
            // 
            if (!Utils.isEmpty(logTable)) {
                ldb.writeLogRecord(transLogTable, status, this, null);
            }
            // 
            if (status.equals(LogStatus.END) || status.equals(LogStatus.STOP)) {
                ldb.cleanupLogRecords(transLogTable, getName());
            }
            // 
            if (!ldb.isAutoCommit()) {
                ldb.commitLog(true, transMeta.getTransLogTable());
            }
        } catch (KettleDatabaseException e) {
            // PDI-9790 error write to log db is transaction error
            log.logError(BaseMessages.getString(PKG, "Database.Error.WriteLogTable", logTable), e);
            errors.incrementAndGet();
        // end PDI-9790
        } catch (Exception e) {
            throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.ErrorWritingLogRecordToTable", transMeta.getTransLogTable().getActualTableName()), e);
        } finally {
            if (intervalInSeconds <= 0 || (status.equals(LogStatus.END) || status.equals(LogStatus.STOP))) {
                ldb.disconnect();
                // disconnected
                transLogTableDatabaseConnection = null;
            }
        }
    }
    return true;
}
Also used : LogStatus(org.pentaho.di.core.logging.LogStatus) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) Database(org.pentaho.di.core.database.Database) TransLogTable(org.pentaho.di.core.logging.TransLogTable) ValueMetaString(org.pentaho.di.core.row.value.ValueMetaString) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta) KettleExtensionPoint(org.pentaho.di.core.extension.KettleExtensionPoint) Date(java.util.Date) UnknownParamException(org.pentaho.di.core.parameters.UnknownParamException) KettleValueException(org.pentaho.di.core.exception.KettleValueException) KettleTransException(org.pentaho.di.core.exception.KettleTransException) DuplicateParamException(org.pentaho.di.core.parameters.DuplicateParamException) KettleFileException(org.pentaho.di.core.exception.KettleFileException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException)

Aggregations

DatabaseMeta (org.pentaho.di.core.database.DatabaseMeta)522 Test (org.junit.Test)133 KettleException (org.pentaho.di.core.exception.KettleException)131 Database (org.pentaho.di.core.database.Database)88 MessageBox (org.eclipse.swt.widgets.MessageBox)66 ErrorDialog (org.pentaho.di.ui.core.dialog.ErrorDialog)63 TransMeta (org.pentaho.di.trans.TransMeta)57 StepMeta (org.pentaho.di.trans.step.StepMeta)54 ArrayList (java.util.ArrayList)53 KettleDatabaseException (org.pentaho.di.core.exception.KettleDatabaseException)48 RowMetaInterface (org.pentaho.di.core.row.RowMetaInterface)44 ValueMetaString (org.pentaho.di.core.row.value.ValueMetaString)42 SlaveServer (org.pentaho.di.cluster.SlaveServer)33 IMetaStore (org.pentaho.metastore.api.IMetaStore)30 ObjectId (org.pentaho.di.repository.ObjectId)29 DatabaseExplorerDialog (org.pentaho.di.ui.core.database.dialog.DatabaseExplorerDialog)29 JobMeta (org.pentaho.di.job.JobMeta)26 TransHopMeta (org.pentaho.di.trans.TransHopMeta)26 RowMetaAndData (org.pentaho.di.core.RowMetaAndData)24 PluginRegistry (org.pentaho.di.core.plugins.PluginRegistry)24