Search in sources :

Example 6 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class JobEntryTrans method execute.

/**
 * Execute this job entry and return the result. In this case it means, just set the result boolean in the Result
 * class.
 *
 * @param result The result of the previous execution
 * @param nr     the job entry number
 * @return The Result of the execution.
 */
@Override
public Result execute(Result result, int nr) throws KettleException {
    result.setEntryNr(nr);
    LogChannelFileWriter logChannelFileWriter = null;
    LogLevel transLogLevel = parentJob.getLogLevel();
    // Set Embedded NamedCluter MetatStore Provider Key so that it can be passed to VFS
    if (parentJobMeta.getNamedClusterEmbedManager() != null) {
        parentJobMeta.getNamedClusterEmbedManager().passEmbeddedMetastoreKey(this, parentJobMeta.getEmbeddedMetastoreProviderKey());
    }
    String realLogFilename = "";
    if (setLogfile) {
        transLogLevel = logFileLevel;
        realLogFilename = environmentSubstitute(getLogFilename());
        // if we do not have one, we must fail
        if (Utils.isEmpty(realLogFilename)) {
            logError(BaseMessages.getString(PKG, "JobTrans.Exception.LogFilenameMissing"));
            result.setNrErrors(1);
            result.setResult(false);
            return result;
        }
        // create parent folder?
        if (!FileUtil.createParentFolder(PKG, realLogFilename, createParentFolder, this.getLogChannel(), this)) {
            result.setNrErrors(1);
            result.setResult(false);
            return result;
        }
        try {
            logChannelFileWriter = new LogChannelFileWriter(this.getLogChannelId(), KettleVFS.getFileObject(realLogFilename, this), setAppendLogfile);
            logChannelFileWriter.startLogging();
        } catch (KettleException e) {
            logError(BaseMessages.getString(PKG, "JobTrans.Error.UnableOpenAppender", realLogFilename, e.toString()));
            logError(Const.getStackTracker(e));
            result.setNrErrors(1);
            result.setResult(false);
            return result;
        }
    }
    // 
    switch(specificationMethod) {
        case FILENAME:
            if (isDetailed()) {
                logDetailed(BaseMessages.getString(PKG, "JobTrans.Log.OpeningTrans", environmentSubstitute(getFilename())));
            }
            break;
        case REPOSITORY_BY_NAME:
            if (isDetailed()) {
                logDetailed(BaseMessages.getString(PKG, "JobTrans.Log.OpeningTransInDirec", environmentSubstitute(getFilename()), environmentSubstitute(directory)));
            }
            break;
        case REPOSITORY_BY_REFERENCE:
            if (isDetailed()) {
                logDetailed(BaseMessages.getString(PKG, "JobTrans.Log.OpeningTransByReference", transObjectId));
            }
            break;
        default:
            break;
    }
    // Load the transformation only once for the complete loop!
    // Throws an exception if it was not possible to load the transformation. For example, the XML file doesn't exist or
    // the repository is down.
    // Log the stack trace and return an error condition from this
    // 
    TransMeta transMeta = null;
    try {
        transMeta = getTransMeta(rep, metaStore, this);
    } catch (KettleException e) {
        logError(BaseMessages.getString(PKG, "JobTrans.Exception.UnableToRunJob", parentJobMeta.getName(), getName(), StringUtils.trim(e.getMessage())), e);
        result.setNrErrors(1);
        result.setResult(false);
        return result;
    }
    int iteration = 0;
    String[] args1 = arguments;
    if (args1 == null || args1.length == 0) {
        // No arguments set, look at the parent job.
        args1 = parentJob.getArguments();
    }
    // initializeVariablesFrom(parentJob);
    // 
    // For the moment only do variable translation at the start of a job, not
    // for every input row (if that would be switched on). This is for safety,
    // the real argument setting is later on.
    // 
    String[] args = null;
    if (args1 != null) {
        args = new String[args1.length];
        for (int idx = 0; idx < args1.length; idx++) {
            args[idx] = environmentSubstitute(args1[idx]);
        }
    }
    RowMetaAndData resultRow = null;
    boolean first = true;
    List<RowMetaAndData> rows = new ArrayList<RowMetaAndData>(result.getRows());
    while ((first && !execPerRow) || (execPerRow && rows != null && iteration < rows.size() && result.getNrErrors() == 0) && !parentJob.isStopped()) {
        // 
        if (execPerRow) {
            result.getRows().clear();
        }
        if (rows != null && execPerRow) {
            resultRow = rows.get(iteration);
        } else {
            resultRow = null;
        }
        NamedParams namedParam = new NamedParamsDefault();
        if (parameters != null) {
            for (int idx = 0; idx < parameters.length; idx++) {
                if (!Utils.isEmpty(parameters[idx])) {
                    // We have a parameter
                    // 
                    namedParam.addParameterDefinition(parameters[idx], "", "Job entry runtime");
                    if (Utils.isEmpty(Const.trim(parameterFieldNames[idx]))) {
                        // There is no field name specified.
                        // 
                        String value = Const.NVL(environmentSubstitute(parameterValues[idx]), "");
                        namedParam.setParameterValue(parameters[idx], value);
                    } else {
                        // something filled in, in the field column...
                        // 
                        String value = "";
                        if (resultRow != null) {
                            value = resultRow.getString(parameterFieldNames[idx], "");
                        }
                        namedParam.setParameterValue(parameters[idx], value);
                    }
                }
            }
        }
        first = false;
        Result previousResult = result;
        try {
            if (isDetailed()) {
                logDetailed(BaseMessages.getString(PKG, "JobTrans.StartingTrans", getFilename(), getName(), getDescription()));
            }
            if (clearResultRows) {
                previousResult.setRows(new ArrayList<RowMetaAndData>());
            }
            if (clearResultFiles) {
                previousResult.getResultFiles().clear();
            }
            /*
         * Set one or more "result" rows on the transformation...
         */
            if (execPerRow) {
                if (argFromPrevious) {
                    // Copy the input row to the (command line) arguments
                    args = null;
                    if (resultRow != null) {
                        args = new String[resultRow.size()];
                        for (int i = 0; i < resultRow.size(); i++) {
                            args[i] = resultRow.getString(i, null);
                        }
                    }
                } else {
                    // Just pass a single row
                    List<RowMetaAndData> newList = new ArrayList<RowMetaAndData>();
                    newList.add(resultRow);
                    // This previous result rows list can be either empty or not.
                    // Depending on the checkbox "clear result rows"
                    // In this case, it would execute the transformation with one extra row each time
                    // Can't figure out a real use-case for it, but hey, who am I to decide that, right?
                    // :-)
                    // 
                    previousResult.getRows().addAll(newList);
                }
                if (paramsFromPrevious) {
                    if (parameters != null) {
                        for (int idx = 0; idx < parameters.length; idx++) {
                            if (!Utils.isEmpty(parameters[idx])) {
                                // We have a parameter
                                if (Utils.isEmpty(Const.trim(parameterFieldNames[idx]))) {
                                    namedParam.setParameterValue(parameters[idx], Const.NVL(environmentSubstitute(parameterValues[idx]), ""));
                                } else {
                                    String fieldValue = "";
                                    if (resultRow != null) {
                                        fieldValue = resultRow.getString(parameterFieldNames[idx], "");
                                    }
                                    // Get the value from the input stream
                                    namedParam.setParameterValue(parameters[idx], Const.NVL(fieldValue, ""));
                                }
                            }
                        }
                    }
                }
            } else {
                if (argFromPrevious) {
                    // Only put the first Row on the arguments
                    args = null;
                    if (resultRow != null) {
                        args = new String[resultRow.size()];
                        for (int i = 0; i < resultRow.size(); i++) {
                            args[i] = resultRow.getString(i, null);
                        }
                    }
                }
                if (paramsFromPrevious) {
                    // Copy the input the parameters
                    if (parameters != null) {
                        for (int idx = 0; idx < parameters.length; idx++) {
                            if (!Utils.isEmpty(parameters[idx])) {
                                // We have a parameter
                                if (Utils.isEmpty(Const.trim(parameterFieldNames[idx]))) {
                                    namedParam.setParameterValue(parameters[idx], Const.NVL(environmentSubstitute(parameterValues[idx]), ""));
                                } else {
                                    String fieldValue = "";
                                    if (resultRow != null) {
                                        fieldValue = resultRow.getString(parameterFieldNames[idx], "");
                                    }
                                    // Get the value from the input stream
                                    namedParam.setParameterValue(parameters[idx], Const.NVL(fieldValue, ""));
                                }
                            }
                        }
                    }
                }
            }
            // Handle the parameters...
            // 
            transMeta.clearParameters();
            String[] parameterNames = transMeta.listParameters();
            StepWithMappingMeta.activateParams(transMeta, transMeta, this, parameterNames, parameters, parameterValues);
            boolean doFallback = true;
            SlaveServer remoteSlaveServer = null;
            TransExecutionConfiguration executionConfiguration = new TransExecutionConfiguration();
            if (!Utils.isEmpty(runConfiguration)) {
                log.logBasic(BaseMessages.getString(PKG, "JobTrans.RunConfig.Message"), runConfiguration);
                runConfiguration = environmentSubstitute(runConfiguration);
                executionConfiguration.setRunConfiguration(runConfiguration);
                try {
                    ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.SpoonTransBeforeStart.id, new Object[] { executionConfiguration, parentJob.getJobMeta(), transMeta, rep });
                    if (!executionConfiguration.isExecutingLocally() && !executionConfiguration.isExecutingRemotely() && !executionConfiguration.isExecutingClustered()) {
                        result.setResult(true);
                        return result;
                    }
                    clustering = executionConfiguration.isExecutingClustered();
                    remoteSlaveServer = executionConfiguration.getRemoteServer();
                    doFallback = false;
                } catch (KettleException e) {
                    log.logError(e.getMessage(), getName());
                    result.setNrErrors(1);
                    result.setResult(false);
                    return result;
                }
            }
            if (doFallback) {
                // 
                if (!Utils.isEmpty(remoteSlaveServerName)) {
                    String realRemoteSlaveServerName = environmentSubstitute(remoteSlaveServerName);
                    remoteSlaveServer = parentJob.getJobMeta().findSlaveServer(realRemoteSlaveServerName);
                    if (remoteSlaveServer == null) {
                        throw new KettleException(BaseMessages.getString(PKG, "JobTrans.Exception.UnableToFindRemoteSlaveServer", realRemoteSlaveServerName));
                    }
                }
            }
            // 
            if (clustering) {
                executionConfiguration.setClusterPosting(true);
                executionConfiguration.setClusterPreparing(true);
                executionConfiguration.setClusterStarting(true);
                executionConfiguration.setClusterShowingTransformation(false);
                executionConfiguration.setSafeModeEnabled(false);
                executionConfiguration.setRepository(rep);
                executionConfiguration.setLogLevel(transLogLevel);
                executionConfiguration.setPreviousResult(previousResult);
                // Also pass the variables from the transformation into the execution configuration
                // That way it can go over the HTTP connection to the slave server.
                // 
                executionConfiguration.setVariables(transMeta);
                // Also set the arguments...
                // 
                executionConfiguration.setArgumentStrings(args);
                if (parentJob.getJobMeta().isBatchIdPassed()) {
                    executionConfiguration.setPassedBatchId(parentJob.getPassedBatchId());
                }
                TransSplitter transSplitter = null;
                long errors = 0;
                try {
                    transSplitter = Trans.executeClustered(transMeta, executionConfiguration);
                    // Monitor the running transformations, wait until they are done.
                    // Also kill them all if anything goes bad
                    // Also clean up afterwards...
                    // 
                    errors += Trans.monitorClusteredTransformation(log, transSplitter, parentJob);
                } catch (Exception e) {
                    logError("Error during clustered execution. Cleaning up clustered execution.", e);
                    // In case something goes wrong, make sure to clean up afterwards!
                    // 
                    errors++;
                    if (transSplitter != null) {
                        Trans.cleanupCluster(log, transSplitter);
                    } else {
                        // Try to clean anyway...
                        // 
                        SlaveServer master = null;
                        for (StepMeta stepMeta : transMeta.getSteps()) {
                            if (stepMeta.isClustered()) {
                                for (SlaveServer slaveServer : stepMeta.getClusterSchema().getSlaveServers()) {
                                    if (slaveServer.isMaster()) {
                                        master = slaveServer;
                                        break;
                                    }
                                }
                            }
                        }
                        if (master != null) {
                            master.deAllocateServerSockets(transMeta.getName(), null);
                        }
                    }
                }
                result.clear();
                if (transSplitter != null) {
                    Result clusterResult = Trans.getClusteredTransformationResult(log, transSplitter, parentJob, executionConfiguration.isLogRemoteExecutionLocally());
                    result.add(clusterResult);
                }
                result.setNrErrors(result.getNrErrors() + errors);
            } else if (remoteSlaveServer != null) {
                // Execute this transformation remotely
                // 
                // Make sure we can parameterize the slave server connection
                // 
                remoteSlaveServer.shareVariablesWith(this);
                // Remote execution...
                // 
                executionConfiguration.setPreviousResult(previousResult.clone());
                executionConfiguration.setArgumentStrings(args);
                executionConfiguration.setVariables(this);
                executionConfiguration.setRemoteServer(remoteSlaveServer);
                executionConfiguration.setLogLevel(transLogLevel);
                executionConfiguration.setRepository(rep);
                executionConfiguration.setLogFileName(realLogFilename);
                executionConfiguration.setSetAppendLogfile(setAppendLogfile);
                executionConfiguration.setSetLogfile(setLogfile);
                Map<String, String> params = executionConfiguration.getParams();
                for (String param : transMeta.listParameters()) {
                    String value = Const.NVL(transMeta.getParameterValue(param), Const.NVL(transMeta.getParameterDefault(param), transMeta.getVariable(param)));
                    params.put(param, value);
                }
                if (parentJob.getJobMeta().isBatchIdPassed()) {
                    executionConfiguration.setPassedBatchId(parentJob.getPassedBatchId());
                }
                // Send the XML over to the slave server
                // Also start the transformation over there...
                // 
                String carteObjectId = Trans.sendToSlaveServer(transMeta, executionConfiguration, rep, metaStore);
                // Now start the monitoring...
                // 
                SlaveServerTransStatus transStatus = null;
                while (!parentJob.isStopped() && waitingToFinish) {
                    try {
                        transStatus = remoteSlaveServer.getTransStatus(transMeta.getName(), carteObjectId, 0);
                        if (!transStatus.isRunning()) {
                            // The transformation is finished, get the result...
                            // 
                            // get the status with the result ( we don't do it above because of changing PDI-15781)
                            transStatus = remoteSlaveServer.getTransStatus(transMeta.getName(), carteObjectId, 0, true);
                            Result remoteResult = transStatus.getResult();
                            result.clear();
                            result.add(remoteResult);
                            // 
                            if (remoteResult.isStopped()) {
                                // 
                                result.setNrErrors(result.getNrErrors() + 1);
                            }
                            // Make sure to clean up : write a log record etc, close any left-over sockets etc.
                            // 
                            remoteSlaveServer.cleanupTransformation(transMeta.getName(), carteObjectId);
                            break;
                        }
                    } catch (Exception e1) {
                        logError(BaseMessages.getString(PKG, "JobTrans.Error.UnableContactSlaveServer", "" + remoteSlaveServer, transMeta.getName()), e1);
                        result.setNrErrors(result.getNrErrors() + 1L);
                        // Stop looking too, chances are too low the server will come back on-line
                        break;
                    }
                    // sleep for 2 seconds
                    try {
                        Thread.sleep(2000);
                    } catch (InterruptedException e) {
                    // Ignore
                    }
                }
                if (parentJob.isStopped()) {
                    // 
                    if (transStatus == null || transStatus.isRunning()) {
                        // Try a remote abort ...
                        // 
                        remoteSlaveServer.stopTransformation(transMeta.getName(), transStatus.getId());
                        // And a cleanup...
                        // 
                        remoteSlaveServer.cleanupTransformation(transMeta.getName(), transStatus.getId());
                        // Set an error state!
                        // 
                        result.setNrErrors(result.getNrErrors() + 1L);
                    }
                }
            } else {
                // Execute this transformation on the local machine
                // 
                // Create the transformation from meta-data
                // 
                // trans = new Trans( transMeta, this );
                final TransMeta meta = transMeta;
                trans = new TransSupplier(transMeta, log, () -> new Trans(meta)).get();
                trans.setParent(this);
                // Pass the socket repository as early as possible...
                // 
                trans.setSocketRepository(parentJob.getSocketRepository());
                if (parentJob.getJobMeta().isBatchIdPassed()) {
                    trans.setPassedBatchId(parentJob.getPassedBatchId());
                }
                // set the parent job on the transformation, variables are taken from here...
                // 
                trans.setParentJob(parentJob);
                trans.setParentVariableSpace(parentJob);
                trans.setLogLevel(transLogLevel);
                trans.setPreviousResult(previousResult);
                trans.setArguments(arguments);
                // Mappings need the repository to load from
                // 
                trans.setRepository(rep);
                // inject the metaStore
                trans.setMetaStore(metaStore);
                // First get the root job
                // 
                Job rootJob = parentJob;
                while (rootJob.getParentJob() != null) {
                    rootJob = rootJob.getParentJob();
                }
                // Get the start and end-date from the root job...
                // 
                trans.setJobStartDate(rootJob.getStartDate());
                trans.setJobEndDate(rootJob.getEndDate());
                // 
                for (DelegationListener delegationListener : parentJob.getDelegationListeners()) {
                    // TODO: copy some settings in the job execution configuration, not strictly needed
                    // but the execution configuration information is useful in case of a job re-start
                    // 
                    delegationListener.transformationDelegationStarted(trans, new TransExecutionConfiguration());
                }
                try {
                    // Start execution...
                    // 
                    trans.execute(args);
                    // TODO is it possible to implement Observer pattern to avoid Thread.sleep here?
                    while (!trans.isFinished() && trans.getErrors() == 0) {
                        if (parentJob.isStopped()) {
                            trans.stopAll();
                            break;
                        } else {
                            try {
                                Thread.sleep(0, 500);
                            } catch (InterruptedException e) {
                            // Ignore errors
                            }
                        }
                    }
                    trans.waitUntilFinished();
                    if (parentJob.isStopped() || trans.getErrors() != 0) {
                        trans.stopAll();
                        result.setNrErrors(1);
                    }
                    Result newResult = trans.getResult();
                    // clear only the numbers, NOT the files or rows.
                    result.clear();
                    result.add(newResult);
                    // Set the result rows too, if any ...
                    if (!Utils.isEmpty(newResult.getRows())) {
                        result.setRows(newResult.getRows());
                    }
                    if (setLogfile) {
                        ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_LOG, KettleVFS.getFileObject(realLogFilename, this), parentJob.getJobname(), toString());
                        result.getResultFiles().put(resultFile.getFile().toString(), resultFile);
                    }
                } catch (KettleException e) {
                    logError(BaseMessages.getString(PKG, "JobTrans.Error.UnablePrepareExec"), e);
                    result.setNrErrors(1);
                }
            }
        } catch (Exception e) {
            logError(BaseMessages.getString(PKG, "JobTrans.ErrorUnableOpenTrans", e.getMessage()));
            logError(Const.getStackTracker(e));
            result.setNrErrors(1);
        }
        iteration++;
    }
    if (setLogfile) {
        if (logChannelFileWriter != null) {
            logChannelFileWriter.stopLogging();
            ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_LOG, logChannelFileWriter.getLogFile(), parentJob.getJobname(), getName());
            result.getResultFiles().put(resultFile.getFile().toString(), resultFile);
            // 
            if (logChannelFileWriter.getException() != null) {
                logError("Unable to open log file [" + getLogFilename() + "] : ");
                logError(Const.getStackTracker(logChannelFileWriter.getException()));
                result.setNrErrors(1);
                result.setResult(false);
                return result;
            }
        }
    }
    if (result.getNrErrors() == 0) {
        result.setResult(true);
    } else {
        result.setResult(false);
    }
    return result;
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) NamedParams(org.pentaho.di.core.parameters.NamedParams) TransMeta(org.pentaho.di.trans.TransMeta) ArrayList(java.util.ArrayList) SlaveServer(org.pentaho.di.cluster.SlaveServer) LogLevel(org.pentaho.di.core.logging.LogLevel) Result(org.pentaho.di.core.Result) RowMetaAndData(org.pentaho.di.core.RowMetaAndData) Job(org.pentaho.di.job.Job) Trans(org.pentaho.di.trans.Trans) NamedParamsDefault(org.pentaho.di.core.parameters.NamedParamsDefault) LogChannelFileWriter(org.pentaho.di.core.logging.LogChannelFileWriter) ResultFile(org.pentaho.di.core.ResultFile) StepMeta(org.pentaho.di.trans.step.StepMeta) KettleExtensionPoint(org.pentaho.di.core.extension.KettleExtensionPoint) KettleXMLException(org.pentaho.di.core.exception.KettleXMLException) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) TransSupplier(org.pentaho.di.trans.TransSupplier) TransExecutionConfiguration(org.pentaho.di.trans.TransExecutionConfiguration) SlaveServerTransStatus(org.pentaho.di.www.SlaveServerTransStatus) TransSplitter(org.pentaho.di.trans.cluster.TransSplitter) Map(java.util.Map) DelegationListener(org.pentaho.di.job.DelegationListener)

Example 7 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class RepositoryImporter method loadSharedObjects.

/**
 * Load the shared objects up front, replace them in the xforms/jobs loaded from XML. We do this for performance
 * reasons.
 *
 * @throws KettleException
 */
protected void loadSharedObjects() throws KettleException {
    sharedObjects = new SharedObjects();
    for (ObjectId id : rep.getDatabaseIDs(false)) {
        DatabaseMeta databaseMeta = rep.loadDatabaseMeta(id, null);
        validateImportedElement(importRules, databaseMeta);
        sharedObjects.storeObject(databaseMeta);
    }
    ObjectId[] slaveIDs = rep.getSlaveIDs(false);
    List<SlaveServer> slaveServers = new ArrayList<SlaveServer>(slaveIDs.length);
    for (ObjectId id : slaveIDs) {
        SlaveServer slaveServer = rep.loadSlaveServer(id, null);
        validateImportedElement(importRules, slaveServer);
        sharedObjects.storeObject(slaveServer);
        slaveServers.add(slaveServer);
    }
    for (ObjectId id : rep.getClusterIDs(false)) {
        ClusterSchema clusterSchema = rep.loadClusterSchema(id, slaveServers, null);
        validateImportedElement(importRules, clusterSchema);
        sharedObjects.storeObject(clusterSchema);
    }
    for (ObjectId id : rep.getPartitionSchemaIDs(false)) {
        PartitionSchema partitionSchema = rep.loadPartitionSchema(id, null);
        validateImportedElement(importRules, partitionSchema);
        sharedObjects.storeObject(partitionSchema);
    }
}
Also used : PartitionSchema(org.pentaho.di.partition.PartitionSchema) ArrayList(java.util.ArrayList) SharedObjects(org.pentaho.di.shared.SharedObjects) SlaveServer(org.pentaho.di.cluster.SlaveServer) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta) ClusterSchema(org.pentaho.di.cluster.ClusterSchema)

Example 8 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class JobMeta method getXML.

/*
   * (non-Javadoc)
   *
   * @see org.pentaho.di.core.xml.XMLInterface#getXML()
   */
public String getXML() {
    // Clear the embedded named clusters.  We will be repopulating from steps that used named clusters
    getNamedClusterEmbedManager().clear();
    Props props = null;
    if (Props.isInitialized()) {
        props = Props.getInstance();
    }
    StringBuilder retval = new StringBuilder(500);
    retval.append(XMLHandler.openTag(XML_TAG)).append(Const.CR);
    retval.append("  ").append(XMLHandler.addTagValue("name", getName()));
    retval.append("  ").append(XMLHandler.addTagValue("description", description));
    retval.append("  ").append(XMLHandler.addTagValue("extended_description", extendedDescription));
    retval.append("  ").append(XMLHandler.addTagValue("job_version", jobVersion));
    if (jobStatus >= 0) {
        retval.append("  ").append(XMLHandler.addTagValue("job_status", jobStatus));
    }
    retval.append("  ").append(XMLHandler.addTagValue("directory", (directory != null ? directory.getPath() : RepositoryDirectory.DIRECTORY_SEPARATOR)));
    retval.append("  ").append(XMLHandler.addTagValue("created_user", createdUser));
    retval.append("  ").append(XMLHandler.addTagValue("created_date", XMLHandler.date2string(createdDate)));
    retval.append("  ").append(XMLHandler.addTagValue("modified_user", modifiedUser));
    retval.append("  ").append(XMLHandler.addTagValue("modified_date", XMLHandler.date2string(modifiedDate)));
    retval.append("    ").append(XMLHandler.openTag(XML_TAG_PARAMETERS)).append(Const.CR);
    String[] parameters = listParameters();
    for (int idx = 0; idx < parameters.length; idx++) {
        retval.append("      ").append(XMLHandler.openTag("parameter")).append(Const.CR);
        retval.append("        ").append(XMLHandler.addTagValue("name", parameters[idx]));
        try {
            retval.append("        ").append(XMLHandler.addTagValue("default_value", getParameterDefault(parameters[idx])));
            retval.append("        ").append(XMLHandler.addTagValue("description", getParameterDescription(parameters[idx])));
        } catch (UnknownParamException e) {
        // skip the default value and/or description. This exception should never happen because we use listParameters()
        // above.
        }
        retval.append("      ").append(XMLHandler.closeTag("parameter")).append(Const.CR);
    }
    retval.append("    ").append(XMLHandler.closeTag(XML_TAG_PARAMETERS)).append(Const.CR);
    Set<DatabaseMeta> usedDatabaseMetas = getUsedDatabaseMetas();
    // Save the database connections...
    for (int i = 0; i < nrDatabases(); i++) {
        DatabaseMeta dbMeta = getDatabase(i);
        if (props != null && props.areOnlyUsedConnectionsSavedToXML()) {
            if (usedDatabaseMetas.contains(dbMeta)) {
                retval.append(dbMeta.getXML());
            }
        } else {
            retval.append(dbMeta.getXML());
        }
    }
    // The slave servers...
    // 
    retval.append("    ").append(XMLHandler.openTag(XML_TAG_SLAVESERVERS)).append(Const.CR);
    for (int i = 0; i < slaveServers.size(); i++) {
        SlaveServer slaveServer = slaveServers.get(i);
        retval.append(slaveServer.getXML());
    }
    retval.append("    ").append(XMLHandler.closeTag(XML_TAG_SLAVESERVERS)).append(Const.CR);
    // 
    for (LogTableInterface logTable : getLogTables()) {
        retval.append(logTable.getXML());
    }
    retval.append("   ").append(XMLHandler.addTagValue("pass_batchid", batchIdPassed));
    retval.append("   ").append(XMLHandler.addTagValue("shared_objects_file", sharedObjectsFile));
    retval.append("  ").append(XMLHandler.openTag("entries")).append(Const.CR);
    for (int i = 0; i < nrJobEntries(); i++) {
        JobEntryCopy jge = getJobEntry(i);
        jge.getEntry().setRepository(repository);
        retval.append(jge.getXML());
    }
    retval.append("  ").append(XMLHandler.closeTag("entries")).append(Const.CR);
    retval.append("  ").append(XMLHandler.openTag("hops")).append(Const.CR);
    for (JobHopMeta hi : jobhops) {
        // Look at all the hops
        retval.append(hi.getXML());
    }
    retval.append("  ").append(XMLHandler.closeTag("hops")).append(Const.CR);
    retval.append("  ").append(XMLHandler.openTag("notepads")).append(Const.CR);
    for (int i = 0; i < nrNotes(); i++) {
        NotePadMeta ni = getNote(i);
        retval.append(ni.getXML());
    }
    retval.append("  ").append(XMLHandler.closeTag("notepads")).append(Const.CR);
    // Also store the attribute groups
    // 
    retval.append(AttributesUtil.getAttributesXml(attributesMap));
    retval.append(XMLHandler.closeTag(XML_TAG)).append(Const.CR);
    return XMLFormatter.format(retval.toString());
}
Also used : Props(org.pentaho.di.core.Props) SlaveServer(org.pentaho.di.cluster.SlaveServer) DatabaseMeta(org.pentaho.di.core.database.DatabaseMeta) Point(org.pentaho.di.core.gui.Point) KettleExtensionPoint(org.pentaho.di.core.extension.KettleExtensionPoint) LogTableInterface(org.pentaho.di.core.logging.LogTableInterface) JobEntryCopy(org.pentaho.di.job.entry.JobEntryCopy) UnknownParamException(org.pentaho.di.core.parameters.UnknownParamException) NotePadMeta(org.pentaho.di.core.NotePadMeta)

Example 9 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class Trans method cleanupCluster.

/**
 * Cleanup the cluster, including the master and all slaves, and return the number of errors that occurred.
 *
 * @param log
 *          the log channel interface
 * @param transSplitter
 *          the TransSplitter object
 * @return the number of errors that occurred in the clustered transformation
 */
public static int cleanupCluster(LogChannelInterface log, TransSplitter transSplitter) {
    SlaveServer[] slaveServers = transSplitter.getSlaveTargets();
    TransMeta[] slaves = transSplitter.getSlaves();
    SlaveServer masterServer;
    try {
        masterServer = transSplitter.getMasterServer();
    } catch (KettleException e) {
        log.logError("Unable to obtain the master server from the cluster", e);
        return 1;
    }
    TransMeta masterTransMeta = transSplitter.getMaster();
    int errors = 0;
    // 
    for (int s = 0; s < slaveServers.length; s++) {
        try {
            cleanupSlaveServer(transSplitter, slaveServers[s], slaves[s]);
        } catch (Exception e) {
            errors++;
            log.logError("Unable to contact slave server '" + slaveServers[s].getName() + "' to clean up slave transformation", e);
        }
    }
    // 
    if (masterTransMeta != null && masterTransMeta.nrSteps() > 0) {
        try {
            cleanupSlaveServer(transSplitter, masterServer, masterTransMeta);
        } catch (Exception e) {
            errors++;
            log.logError("Unable to contact master server '" + masterServer.getName() + "' to clean up master transformation", e);
        }
        // 
        try {
            // Deallocate all ports belonging to this clustered run, not anything else
            // 
            masterServer.deAllocateServerSockets(transSplitter.getOriginalTransformation().getName(), transSplitter.getClusteredRunId());
        } catch (Exception e) {
            errors++;
            log.logError("Unable to contact master server '" + masterServer.getName() + "' to clean up port sockets for transformation'" + transSplitter.getOriginalTransformation().getName() + "'", e);
        }
    }
    return errors;
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) SlaveServer(org.pentaho.di.cluster.SlaveServer) KettleExtensionPoint(org.pentaho.di.core.extension.KettleExtensionPoint) UnknownParamException(org.pentaho.di.core.parameters.UnknownParamException) KettleValueException(org.pentaho.di.core.exception.KettleValueException) KettleTransException(org.pentaho.di.core.exception.KettleTransException) DuplicateParamException(org.pentaho.di.core.parameters.DuplicateParamException) KettleFileException(org.pentaho.di.core.exception.KettleFileException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException)

Example 10 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class Trans method executeClustered.

/**
 * Executes an existing TransSplitter, with the transformation already split.
 *
 * @param transSplitter
 *          the trans splitter
 * @param executionConfiguration
 *          the execution configuration
 * @throws KettleException
 *           the kettle exception
 * @see org.pentaho.di.ui.spoon.delegates.SpoonTransformationDelegate
 */
public static void executeClustered(final TransSplitter transSplitter, final TransExecutionConfiguration executionConfiguration) throws KettleException {
    try {
        // Send the transformations to the servers...
        // 
        // First the master and the slaves...
        // 
        TransMeta master = transSplitter.getMaster();
        final SlaveServer[] slaves = transSplitter.getSlaveTargets();
        final Thread[] threads = new Thread[slaves.length];
        final Throwable[] errors = new Throwable[slaves.length];
        // Keep track of the various Carte object IDs
        // 
        final Map<TransMeta, String> carteObjectMap = transSplitter.getCarteObjectMap();
        // 
        // Send them all on their way...
        // 
        SlaveServer masterServer = null;
        List<StepMeta> masterSteps = master.getTransHopSteps(false);
        if (masterSteps.size() > 0) {
            // If there is something that needs to be done on the master...
            masterServer = transSplitter.getMasterServer();
            if (executionConfiguration.isClusterPosting()) {
                TransConfiguration transConfiguration = new TransConfiguration(master, executionConfiguration);
                Map<String, String> variables = transConfiguration.getTransExecutionConfiguration().getVariables();
                variables.put(Const.INTERNAL_VARIABLE_CLUSTER_SIZE, Integer.toString(slaves.length));
                variables.put(Const.INTERNAL_VARIABLE_CLUSTER_MASTER, "Y");
                // Parameters override the variables but they need to pass over the configuration too...
                // 
                Map<String, String> params = transConfiguration.getTransExecutionConfiguration().getParams();
                TransMeta ot = transSplitter.getOriginalTransformation();
                for (String param : ot.listParameters()) {
                    String value = Const.NVL(ot.getParameterValue(param), Const.NVL(ot.getParameterDefault(param), ot.getVariable(param)));
                    params.put(param, value);
                }
                String masterReply = masterServer.sendXML(transConfiguration.getXML(), RegisterTransServlet.CONTEXT_PATH + "/?xml=Y");
                WebResult webResult = WebResult.fromXMLString(masterReply);
                if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) {
                    throw new KettleException("An error occurred sending the master transformation: " + webResult.getMessage());
                }
                carteObjectMap.put(master, webResult.getId());
            }
        }
        // 
        for (int i = 0; i < slaves.length; i++) {
            final int index = i;
            final TransMeta slaveTrans = transSplitter.getSlaveTransMap().get(slaves[i]);
            if (executionConfiguration.isClusterPosting()) {
                Runnable runnable = new Runnable() {

                    @Override
                    public void run() {
                        try {
                            // Create a copy for local use... We get race-conditions otherwise...
                            // 
                            TransExecutionConfiguration slaveTransExecutionConfiguration = (TransExecutionConfiguration) executionConfiguration.clone();
                            TransConfiguration transConfiguration = new TransConfiguration(slaveTrans, slaveTransExecutionConfiguration);
                            Map<String, String> variables = slaveTransExecutionConfiguration.getVariables();
                            variables.put(Const.INTERNAL_VARIABLE_SLAVE_SERVER_NUMBER, Integer.toString(index));
                            variables.put(Const.INTERNAL_VARIABLE_SLAVE_SERVER_NAME, slaves[index].getName());
                            variables.put(Const.INTERNAL_VARIABLE_CLUSTER_SIZE, Integer.toString(slaves.length));
                            variables.put(Const.INTERNAL_VARIABLE_CLUSTER_MASTER, "N");
                            // Parameters override the variables but they need to pass over the configuration too...
                            // 
                            Map<String, String> params = slaveTransExecutionConfiguration.getParams();
                            TransMeta ot = transSplitter.getOriginalTransformation();
                            for (String param : ot.listParameters()) {
                                String value = Const.NVL(ot.getParameterValue(param), Const.NVL(ot.getParameterDefault(param), ot.getVariable(param)));
                                params.put(param, value);
                            }
                            String slaveReply = slaves[index].sendXML(transConfiguration.getXML(), RegisterTransServlet.CONTEXT_PATH + "/?xml=Y");
                            WebResult webResult = WebResult.fromXMLString(slaveReply);
                            if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) {
                                throw new KettleException("An error occurred sending a slave transformation: " + webResult.getMessage());
                            }
                            carteObjectMap.put(slaveTrans, webResult.getId());
                        } catch (Throwable t) {
                            errors[index] = t;
                        }
                    }
                };
                threads[i] = new Thread(runnable);
            }
        }
        // Start the slaves
        for (int i = 0; i < threads.length; i++) {
            if (threads[i] != null) {
                threads[i].start();
            }
        }
        // 
        for (int i = 0; i < threads.length; i++) {
            if (threads[i] != null) {
                threads[i].join();
                if (errors[i] != null) {
                    throw new KettleException(errors[i]);
                }
            }
        }
        if (executionConfiguration.isClusterPosting()) {
            if (executionConfiguration.isClusterPreparing()) {
                // Prepare the master...
                if (masterSteps.size() > 0) {
                    // If there is something that needs to be done on the master...
                    String carteObjectId = carteObjectMap.get(master);
                    String masterReply = masterServer.execService(PrepareExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(master.getName(), "UTF-8") + "&id=" + URLEncoder.encode(carteObjectId, "UTF-8") + "&xml=Y");
                    WebResult webResult = WebResult.fromXMLString(masterReply);
                    if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) {
                        throw new KettleException("An error occurred while preparing the execution of the master transformation: " + webResult.getMessage());
                    }
                }
                // WG: Should these be threaded like the above initialization?
                for (int i = 0; i < slaves.length; i++) {
                    TransMeta slaveTrans = transSplitter.getSlaveTransMap().get(slaves[i]);
                    String carteObjectId = carteObjectMap.get(slaveTrans);
                    String slaveReply = slaves[i].execService(PrepareExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(slaveTrans.getName(), "UTF-8") + "&id=" + URLEncoder.encode(carteObjectId, "UTF-8") + "&xml=Y");
                    WebResult webResult = WebResult.fromXMLString(slaveReply);
                    if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) {
                        throw new KettleException("An error occurred while preparing the execution of a slave transformation: " + webResult.getMessage());
                    }
                }
            }
            if (executionConfiguration.isClusterStarting()) {
                // Start the master...
                if (masterSteps.size() > 0) {
                    // If there is something that needs to be done on the master...
                    String carteObjectId = carteObjectMap.get(master);
                    String masterReply = masterServer.execService(StartExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(master.getName(), "UTF-8") + "&id=" + URLEncoder.encode(carteObjectId, "UTF-8") + "&xml=Y");
                    WebResult webResult = WebResult.fromXMLString(masterReply);
                    if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) {
                        throw new KettleException("An error occurred while starting the execution of the master transformation: " + webResult.getMessage());
                    }
                }
                // WG: Should these be threaded like the above initialization?
                for (int i = 0; i < slaves.length; i++) {
                    TransMeta slaveTrans = transSplitter.getSlaveTransMap().get(slaves[i]);
                    String carteObjectId = carteObjectMap.get(slaveTrans);
                    String slaveReply = slaves[i].execService(StartExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(slaveTrans.getName(), "UTF-8") + "&id=" + URLEncoder.encode(carteObjectId, "UTF-8") + "&xml=Y");
                    WebResult webResult = WebResult.fromXMLString(slaveReply);
                    if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) {
                        throw new KettleException("An error occurred while starting the execution of a slave transformation: " + webResult.getMessage());
                    }
                }
            }
        }
    } catch (KettleException ke) {
        throw ke;
    } catch (Exception e) {
        throw new KettleException("There was an error during transformation split", e);
    }
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) ValueMetaString(org.pentaho.di.core.row.value.ValueMetaString) SlaveServer(org.pentaho.di.cluster.SlaveServer) StepMeta(org.pentaho.di.trans.step.StepMeta) KettleExtensionPoint(org.pentaho.di.core.extension.KettleExtensionPoint) UnknownParamException(org.pentaho.di.core.parameters.UnknownParamException) KettleValueException(org.pentaho.di.core.exception.KettleValueException) KettleTransException(org.pentaho.di.core.exception.KettleTransException) DuplicateParamException(org.pentaho.di.core.parameters.DuplicateParamException) KettleFileException(org.pentaho.di.core.exception.KettleFileException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) RunThread(org.pentaho.di.trans.step.RunThread) StepInitThread(org.pentaho.di.trans.step.StepInitThread) WebResult(org.pentaho.di.www.WebResult)

Aggregations

SlaveServer (org.pentaho.di.cluster.SlaveServer)103 KettleException (org.pentaho.di.core.exception.KettleException)35 DatabaseMeta (org.pentaho.di.core.database.DatabaseMeta)28 ClusterSchema (org.pentaho.di.cluster.ClusterSchema)22 KettleDatabaseException (org.pentaho.di.core.exception.KettleDatabaseException)18 PartitionSchema (org.pentaho.di.partition.PartitionSchema)18 Test (org.junit.Test)17 KettleExtensionPoint (org.pentaho.di.core.extension.KettleExtensionPoint)17 JobMeta (org.pentaho.di.job.JobMeta)16 ObjectId (org.pentaho.di.repository.ObjectId)15 StepMeta (org.pentaho.di.trans.step.StepMeta)14 ArrayList (java.util.ArrayList)13 TransMeta (org.pentaho.di.trans.TransMeta)11 KettleFileException (org.pentaho.di.core.exception.KettleFileException)10 NotePadMeta (org.pentaho.di.core.NotePadMeta)9 UnknownParamException (org.pentaho.di.core.parameters.UnknownParamException)9 Point (org.pentaho.di.core.gui.Point)8 KettleXMLException (org.pentaho.di.core.exception.KettleXMLException)7 DuplicateParamException (org.pentaho.di.core.parameters.DuplicateParamException)7 JobEntryCopy (org.pentaho.di.job.entry.JobEntryCopy)7