Search in sources :

Example 96 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class Trans method sendToSlaveServer.

/**
 * Send the transformation for execution to a Carte slave server.
 *
 * @param transMeta              the transformation meta-data
 * @param executionConfiguration the transformation execution configuration
 * @param repository             the repository
 * @return The Carte object ID on the server.
 * @throws KettleException if any errors occur during the dispatch to the slave server
 */
public static String sendToSlaveServer(TransMeta transMeta, TransExecutionConfiguration executionConfiguration, Repository repository, IMetaStore metaStore) throws KettleException {
    String carteObjectId;
    SlaveServer slaveServer = executionConfiguration.getRemoteServer();
    if (slaveServer == null) {
        throw new KettleException("No slave server specified");
    }
    if (Utils.isEmpty(transMeta.getName())) {
        throw new KettleException("The transformation needs a name to uniquely identify it by on the remote server.");
    }
    // Inject certain internal variables to make it more intuitive.
    // 
    Map<String, String> vars = new HashMap<>();
    for (String var : Const.INTERNAL_TRANS_VARIABLES) {
        vars.put(var, transMeta.getVariable(var));
    }
    for (String var : Const.INTERNAL_JOB_VARIABLES) {
        vars.put(var, transMeta.getVariable(var));
    }
    executionConfiguration.getVariables().putAll(vars);
    slaveServer.injectVariables(executionConfiguration.getVariables());
    slaveServer.getLogChannel().setLogLevel(executionConfiguration.getLogLevel());
    try {
        if (executionConfiguration.isPassingExport()) {
            // First export the job...
            // 
            FileObject tempFile = KettleVFS.createTempFile("transExport", KettleVFS.Suffix.ZIP, transMeta);
            // the executionConfiguration should not include a repository here because all the resources should be
            // retrieved from the exported zip file
            TransExecutionConfiguration clonedConfiguration = (TransExecutionConfiguration) executionConfiguration.clone();
            clonedConfiguration.setRepository(null);
            TopLevelResource topLevelResource = ResourceUtil.serializeResourceExportInterface(tempFile.getName().toString(), transMeta, transMeta, repository, metaStore, clonedConfiguration.getXML(), CONFIGURATION_IN_EXPORT_FILENAME);
            // Send the zip file over to the slave server...
            // 
            String result = slaveServer.sendExport(topLevelResource.getArchiveName(), RegisterPackageServlet.TYPE_TRANS, topLevelResource.getBaseResourceName());
            WebResult webResult = WebResult.fromXMLString(result);
            if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) {
                throw new KettleException("There was an error passing the exported transformation to the remote server: " + Const.CR + webResult.getMessage());
            }
            carteObjectId = webResult.getId();
        } else {
            // Now send it off to the remote server...
            // 
            String xml = new TransConfiguration(transMeta, executionConfiguration).getXML();
            String reply = slaveServer.sendXML(xml, RegisterTransServlet.CONTEXT_PATH + "/?xml=Y");
            WebResult webResult = WebResult.fromXMLString(reply);
            if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) {
                throw new KettleException("There was an error posting the transformation on the remote server: " + Const.CR + webResult.getMessage());
            }
            carteObjectId = webResult.getId();
        }
        // Prepare the transformation
        // 
        String reply = slaveServer.execService(PrepareExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(transMeta.getName(), "UTF-8") + "&xml=Y&id=" + carteObjectId);
        WebResult webResult = WebResult.fromXMLString(reply);
        if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) {
            throw new KettleException("There was an error preparing the transformation for excution on the remote server: " + Const.CR + webResult.getMessage());
        }
        // Start the transformation
        // 
        reply = slaveServer.execService(StartExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(transMeta.getName(), "UTF-8") + "&xml=Y&id=" + carteObjectId);
        webResult = WebResult.fromXMLString(reply);
        if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) {
            throw new KettleException("There was an error starting the transformation on the remote server: " + Const.CR + webResult.getMessage());
        }
        return carteObjectId;
    } catch (KettleException ke) {
        throw ke;
    } catch (Exception e) {
        throw new KettleException(e);
    }
}
Also used : TopLevelResource(org.pentaho.di.resource.TopLevelResource) KettleException(org.pentaho.di.core.exception.KettleException) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ValueMetaString(org.pentaho.di.core.row.value.ValueMetaString) FileObject(org.apache.commons.vfs2.FileObject) SlaveServer(org.pentaho.di.cluster.SlaveServer) WebResult(org.pentaho.di.www.WebResult) UnknownParamException(org.pentaho.di.core.parameters.UnknownParamException) KettleValueException(org.pentaho.di.core.exception.KettleValueException) KettleTransException(org.pentaho.di.core.exception.KettleTransException) DuplicateParamException(org.pentaho.di.core.parameters.DuplicateParamException) KettleFileException(org.pentaho.di.core.exception.KettleFileException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException)

Example 97 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class TransSplitter method checkClusterConfiguration.

private void checkClusterConfiguration() throws KettleException {
    Map<String, ClusterSchema> map = new Hashtable<String, ClusterSchema>();
    List<StepMeta> steps = originalTransformation.getSteps();
    for (int i = 0; i < steps.size(); i++) {
        StepMeta step = steps.get(i);
        ClusterSchema clusterSchema = step.getClusterSchema();
        if (clusterSchema != null) {
            map.put(clusterSchema.getName(), clusterSchema);
            // 
            if (clusterSchema.findMaster() == null) {
                throw new KettleException("No master server was specified in cluster schema [" + clusterSchema + "]");
            }
            // Remember cluster details while we have the cluster handy
            // 
            socketsBufferSize = Const.toInt(originalTransformation.environmentSubstitute(clusterSchema.getSocketsBufferSize()), 50000);
            compressingSocketStreams = clusterSchema.isSocketsCompressed();
            // Validate the number of slaves. We need at least one to have a valid cluster
            // 
            List<SlaveServer> slaves = clusterSchema.getSlaveServersFromMasterOrLocal();
            int count = 0;
            for (int s = 0; s < slaves.size(); s++) {
                if (!slaves.get(s).isMaster()) {
                    count++;
                }
            }
            if (count <= 0) {
                throw new KettleException("At least one slave server is required to be present in cluster schema [" + clusterSchema + "]");
            }
        }
    }
    if (map.size() == 0) {
        throw new KettleException("No cluster schemas are being used.  As such it is not possible to split and cluster this transformation.");
    }
    if (map.size() > 1) {
        throw new KettleException("At this time we don't support the use of multiple cluster schemas in one and the same transformation.");
    }
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) Hashtable(java.util.Hashtable) SlaveServer(org.pentaho.di.cluster.SlaveServer) StepMeta(org.pentaho.di.trans.step.StepMeta) ClusterSchema(org.pentaho.di.cluster.ClusterSchema)

Example 98 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class TransSplitter method getPort.

/**
 * Get the port for the given cluster schema, slave server and step.
 *
 * If a port was allocated, that is returned, otherwise a new one is allocated. We need to verify that the port wasn't
 * already used on the same host with perhaps several Carte instances on it. In order
 *
 * @param clusterSchema
 *          The cluster schema to use
 *
 * @return the port to use for that step/slaveserver/cluster combination
 */
private int getPort(ClusterSchema clusterSchema, SlaveServer sourceSlave, String sourceStepName, int sourceStepCopy, SlaveServer targetSlave, String targetStepName, int targetStepCopy) throws Exception {
    SlaveServer masterSlave = clusterSchema.findMaster();
    String portCacheKey = createPortCacheKey(sourceSlave, sourceStepName, sourceStepCopy, targetSlave, targetStepName, targetStepCopy);
    Integer portNumber = portCache.get(portCacheKey);
    if (portNumber != null) {
        return portNumber.intValue();
    }
    String realHostname = sourceSlave.environmentSubstitute(sourceSlave.getHostname());
    int port = masterSlave.allocateServerSocket(clusteredRunId, Const.toInt(clusterSchema.getBasePort(), 40000), realHostname, originalTransformation.getName(), sourceSlave.getName(), sourceStepName, Integer.toString(// Source
    sourceStepCopy), targetSlave.getName(), targetStepName, // Target
    Integer.toString(targetStepCopy));
    portCache.put(portCacheKey, port);
    return port;
}
Also used : SlaveServer(org.pentaho.di.cluster.SlaveServer)

Example 99 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class TransSplitter method splitOriginalTransformation.

public void splitOriginalTransformation() throws KettleException {
    clear();
    // Mixing clusters is not supported at the moment
    // Perform some basic checks on the cluster configuration.
    // 
    findUsedOriginalSteps();
    checkClusterConfiguration();
    generateSlavePartitionSchemas();
    try {
        SlaveServer masterSlaveServer = getMasterServer();
        masterTransMeta = getOriginalCopy(false, null, null);
        ClusterSchema clusterSchema = originalTransformation.findFirstUsedClusterSchema();
        List<SlaveServer> slaveServers = clusterSchema.getSlaveServers();
        int nrSlavesNodes = clusterSchema.findNrSlaves();
        boolean encrypt = false;
        byte[] transformationKey = null;
        PublicKey pubK = null;
        if (encrypt) {
            KeyPair pair = CertificateGenEncryptUtil.generateKeyPair();
            pubK = pair.getPublic();
            PrivateKey privK = pair.getPrivate();
            Key key1 = CertificateGenEncryptUtil.generateSingleKey();
            try {
                transformationKey = CertificateGenEncryptUtil.encodeKeyForTransmission(privK, key1);
            } catch (InvalidKeyException ex) {
                masterTransMeta.getLogChannel().logError("Invalid key was used for encoding", ex);
            } catch (IllegalBlockSizeException ex) {
                masterTransMeta.getLogChannel().logError("Error happenned during key encoding", ex);
            } catch (Exception ex) {
                masterTransMeta.getLogChannel().logError("Error happenned during encryption initialization", ex);
            }
        }
        for (int r = 0; r < referenceSteps.length; r++) {
            StepMeta referenceStep = referenceSteps[r];
            List<StepMeta> prevSteps = originalTransformation.findPreviousSteps(referenceStep);
            int nrPreviousSteps = prevSteps.size();
            for (int p = 0; p < nrPreviousSteps; p++) {
                StepMeta previousStep = prevSteps.get(p);
                if (!referenceStep.isClustered()) {
                    if (!previousStep.isClustered()) {
                        // No clustering involved here: just add the reference step to the master
                        // 
                        StepMeta target = masterTransMeta.findStep(referenceStep.getName());
                        if (target == null) {
                            target = (StepMeta) referenceStep.clone();
                            masterTransMeta.addStep(target);
                        }
                        StepMeta source = masterTransMeta.findStep(previousStep.getName());
                        if (source == null) {
                            source = (StepMeta) previousStep.clone();
                            masterTransMeta.addStep(source);
                        }
                        // Add a hop too...
                        // 
                        TransHopMeta masterHop = new TransHopMeta(source, target);
                        masterTransMeta.addTransHop(masterHop);
                    } else {
                        // reference step is NOT clustered
                        // Previous step is clustered
                        // --> We read from the slave server using socket readers.
                        // We need a reader for each slave server in the cluster
                        // 
                        // Also add the reference step to the master. (cloned)
                        // 
                        StepMeta masterStep = masterTransMeta.findStep(referenceStep.getName());
                        if (masterStep == null) {
                            masterStep = (StepMeta) referenceStep.clone();
                            masterStep.setLocation(masterStep.getLocation().x, masterStep.getLocation().y);
                            masterTransMeta.addStep(masterStep);
                        }
                        Queue<Integer> masterStepCopyNumbers = new LinkedList<Integer>();
                        for (int i = 0; i < masterStep.getCopies(); i++) {
                            masterStepCopyNumbers.add(i);
                        }
                        // 
                        for (int slaveNr = 0; slaveNr < slaveServers.size(); slaveNr++) {
                            SlaveServer sourceSlaveServer = slaveServers.get(slaveNr);
                            if (!sourceSlaveServer.isMaster()) {
                                // MASTER: add remote input steps to the master step. That way it can receive data over sockets.
                                // 
                                // SLAVE : add remote output steps to the previous step
                                // 
                                TransMeta slave = getSlaveTransformation(clusterSchema, sourceSlaveServer);
                                // See if we can add a link to the previous using the Remote Steps concept.
                                // 
                                StepMeta slaveStep = slave.findStep(previousStep.getName());
                                if (slaveStep == null) {
                                    slaveStep = addSlaveCopy(slave, previousStep, sourceSlaveServer);
                                }
                                // Make sure the data finds its way back to the master.
                                // 
                                // Verify the partitioning for this slave step.
                                // It's running in 1 or more copies depending on the number of partitions
                                // Get the number of target partitions...
                                // 
                                StepPartitioningMeta previousStepPartitioningMeta = previousStep.getStepPartitioningMeta();
                                PartitionSchema previousPartitionSchema = previousStepPartitioningMeta.getPartitionSchema();
                                int nrOfSourceCopies = determineNrOfStepCopies(sourceSlaveServer, previousStep);
                                // 
                                if (masterStep.getCopies() != 1 && masterStep.getCopies() != nrOfSourceCopies) {
                                    // this case might be handled correctly later
                                    String message = BaseMessages.getString(PKG, "TransSplitter.Clustering.CopyNumberStep", nrSlavesNodes, previousStep.getName(), masterStep.getName());
                                    throw new KettleException(message);
                                }
                                // 
                                for (int sourceCopyNr = 0; sourceCopyNr < nrOfSourceCopies; sourceCopyNr++) {
                                    // The masterStepCopy number is increasing for each remote copy on each slave.
                                    // This makes the master distribute to each copy of the slave properly.
                                    // There is a check above to make sure that the master has either 1 copy or the same as slave*copies
                                    Integer masterStepCopyNr = masterStepCopyNumbers.poll();
                                    if (masterStepCopyNr == null) {
                                        masterStepCopyNr = 0;
                                    }
                                    // We open a port on the various slave servers...
                                    // So the source is the slave server, the target the master.
                                    // 
                                    int port = getPort(clusterSchema, sourceSlaveServer, slaveStep.getName(), sourceCopyNr, masterSlaveServer, masterStep.getName(), masterStepCopyNr);
                                    RemoteStep remoteMasterStep = new RemoteStep(sourceSlaveServer.getHostname(), masterSlaveServer.getHostname(), Integer.toString(port), slaveStep.getName(), sourceCopyNr, masterStep.getName(), masterStepCopyNr, sourceSlaveServer.getName(), masterSlaveServer.getName(), socketsBufferSize, compressingSocketStreams, originalTransformation.getStepFields(previousStep));
                                    remoteMasterStep.setEncryptingStreams(encrypt);
                                    remoteMasterStep.setKey(transformationKey);
                                    masterStep.getRemoteInputSteps().add(remoteMasterStep);
                                    RemoteStep remoteSlaveStep = new RemoteStep(sourceSlaveServer.getHostname(), masterSlaveServer.getHostname(), Integer.toString(port), slaveStep.getName(), sourceCopyNr, masterStep.getName(), masterStepCopyNr, sourceSlaveServer.getName(), masterSlaveServer.getName(), socketsBufferSize, compressingSocketStreams, originalTransformation.getStepFields(previousStep));
                                    remoteSlaveStep.setEncryptingStreams(encrypt);
                                    remoteSlaveStep.setKey(transformationKey);
                                    slaveStep.getRemoteOutputSteps().add(remoteSlaveStep);
                                    // 
                                    if (slaveStep.isPartitioned()) {
                                        slaveStepCopyPartitionDistribution.addPartition(sourceSlaveServer.getName(), previousPartitionSchema.getName(), sourceCopyNr);
                                    }
                                }
                                // 
                                if (referenceStep.isPartitioned()) {
                                    // Set the target partitioning schema for the source step (master)
                                    // 
                                    StepPartitioningMeta stepPartitioningMeta = previousStepPartitioningMeta.clone();
                                    PartitionSchema partitionSchema = stepPartitioningMeta.getPartitionSchema();
                                    partitionSchema.setName(createTargetPartitionSchemaName(partitionSchema.getName()));
                                    if (partitionSchema.isDynamicallyDefined()) {
                                        // Expand the cluster definition to: nrOfSlaves*nrOfPartitionsPerSlave...
                                        // 
                                        partitionSchema.expandPartitionsDynamically(clusterSchema.findNrSlaves(), originalTransformation);
                                    }
                                    masterStep.setTargetStepPartitioningMeta(stepPartitioningMeta);
                                    masterTransMeta.addOrReplacePartitionSchema(partitionSchema);
                                    // Now set the partitioning schema for the slave step...
                                    // For the slave step, we only should those partition IDs that are interesting for the current
                                    // slave...
                                    // 
                                    stepPartitioningMeta = previousStepPartitioningMeta.clone();
                                    partitionSchema = stepPartitioningMeta.getPartitionSchema();
                                    partitionSchema.setName(createSlavePartitionSchemaName(partitionSchema.getName()));
                                    if (partitionSchema.isDynamicallyDefined()) {
                                        // Expand the cluster definition to: nrOfSlaves*nrOfPartitionsPerSlave...
                                        // 
                                        partitionSchema.expandPartitionsDynamically(clusterSchema.findNrSlaves(), originalTransformation);
                                    }
                                    partitionSchema.retainPartitionsForSlaveServer(clusterSchema.findNrSlaves(), getSlaveServerNumber(clusterSchema, sourceSlaveServer));
                                    slave.addOrReplacePartitionSchema(partitionSchema);
                                }
                            }
                        }
                    }
                } else {
                    if (!previousStep.isClustered()) {
                        // reference step is clustered
                        // previous step is not clustered
                        // --> Add a socket writer for each slave server
                        // 
                        // MASTER : add remote output step to the previous step
                        // 
                        StepMeta sourceStep = masterTransMeta.findStep(previousStep.getName());
                        if (sourceStep == null) {
                            sourceStep = (StepMeta) previousStep.clone();
                            sourceStep.setLocation(previousStep.getLocation().x, previousStep.getLocation().y);
                            masterTransMeta.addStep(sourceStep);
                        }
                        Queue<Integer> masterStepCopyNumbers = new LinkedList<Integer>();
                        for (int i = 0; i < sourceStep.getCopies(); i++) {
                            masterStepCopyNumbers.add(i);
                        }
                        for (int s = 0; s < slaveServers.size(); s++) {
                            SlaveServer targetSlaveServer = slaveServers.get(s);
                            if (!targetSlaveServer.isMaster()) {
                                // SLAVE : add remote input step to the reference slave step...
                                // 
                                TransMeta slaveTransMeta = getSlaveTransformation(clusterSchema, targetSlaveServer);
                                // also add the step itself.
                                StepMeta targetStep = slaveTransMeta.findStep(referenceStep.getName());
                                if (targetStep == null) {
                                    targetStep = addSlaveCopy(slaveTransMeta, referenceStep, targetSlaveServer);
                                }
                                // Verify the partitioning for this slave step.
                                // It's running in 1 or more copies depending on the number of partitions
                                // Get the number of target partitions...
                                // 
                                StepPartitioningMeta targetStepPartitioningMeta = referenceStep.getStepPartitioningMeta();
                                PartitionSchema targetPartitionSchema = targetStepPartitioningMeta.getPartitionSchema();
                                int nrOfTargetCopies = determineNrOfStepCopies(targetSlaveServer, referenceStep);
                                // 
                                for (int targetCopyNr = 0; targetCopyNr < nrOfTargetCopies; targetCopyNr++) {
                                    // The masterStepCopy number is increasing for each remote copy on each slave.
                                    // This makes the master distribute to each copy of the slave properly.
                                    // There is a check above to make sure that the master has either 1 copy or the same as slave*copies
                                    Integer masterStepCopyNr = masterStepCopyNumbers.poll();
                                    if (masterStepCopyNr == null) {
                                        masterStepCopyNr = 0;
                                    }
                                    // The master step opens server socket ports
                                    // So the IP address should be the same, in this case, the master...
                                    // 
                                    int port = getPort(clusterSchema, masterSlaveServer, sourceStep.getName(), masterStepCopyNr, targetSlaveServer, referenceStep.getName(), targetCopyNr);
                                    RemoteStep remoteMasterStep = new RemoteStep(masterSlaveServer.getHostname(), targetSlaveServer.getHostname(), Integer.toString(port), sourceStep.getName(), masterStepCopyNr, referenceStep.getName(), targetCopyNr, masterSlaveServer.getName(), targetSlaveServer.getName(), socketsBufferSize, compressingSocketStreams, originalTransformation.getStepFields(previousStep));
                                    remoteMasterStep.setEncryptingStreams(encrypt);
                                    remoteMasterStep.setKey(transformationKey);
                                    sourceStep.getRemoteOutputSteps().add(remoteMasterStep);
                                    RemoteStep remoteSlaveStep = new RemoteStep(masterSlaveServer.getHostname(), targetSlaveServer.getHostname(), Integer.toString(port), sourceStep.getName(), masterStepCopyNr, referenceStep.getName(), targetCopyNr, masterSlaveServer.getName(), targetSlaveServer.getName(), socketsBufferSize, compressingSocketStreams, originalTransformation.getStepFields(previousStep));
                                    remoteSlaveStep.setEncryptingStreams(encrypt);
                                    remoteSlaveStep.setKey(transformationKey);
                                    targetStep.getRemoteInputSteps().add(remoteSlaveStep);
                                    // 
                                    if (targetStep.isPartitioned()) {
                                        slaveStepCopyPartitionDistribution.addPartition(targetSlaveServer.getName(), targetPartitionSchema.getName(), targetCopyNr);
                                    }
                                }
                                // 
                                if (targetStepPartitioningMeta.isPartitioned()) {
                                    // Set the target partitioning schema for the source step (master)
                                    // 
                                    StepPartitioningMeta stepPartitioningMeta = targetStepPartitioningMeta.clone();
                                    PartitionSchema partitionSchema = stepPartitioningMeta.getPartitionSchema();
                                    partitionSchema.setName(createTargetPartitionSchemaName(partitionSchema.getName()));
                                    if (partitionSchema.isDynamicallyDefined()) {
                                        // Expand the cluster definition to: nrOfSlaves*nrOfPartitionsPerSlave...
                                        // 
                                        partitionSchema.expandPartitionsDynamically(clusterSchema.findNrSlaves(), originalTransformation);
                                    }
                                    sourceStep.setTargetStepPartitioningMeta(stepPartitioningMeta);
                                    masterTransMeta.addOrReplacePartitionSchema(partitionSchema);
                                    // Now set the partitioning schema for the slave step...
                                    // For the slave step, we only should those partition IDs that are interesting for the current
                                    // slave...
                                    // 
                                    stepPartitioningMeta = targetStepPartitioningMeta.clone();
                                    partitionSchema = stepPartitioningMeta.getPartitionSchema();
                                    partitionSchema.setName(createSlavePartitionSchemaName(partitionSchema.getName()));
                                    if (partitionSchema.isDynamicallyDefined()) {
                                        // Expand the cluster definition to: nrOfSlaves*nrOfPartitionsPerSlave...
                                        // 
                                        partitionSchema.expandPartitionsDynamically(clusterSchema.findNrSlaves(), originalTransformation);
                                    }
                                    partitionSchema.retainPartitionsForSlaveServer(clusterSchema.findNrSlaves(), getSlaveServerNumber(clusterSchema, targetSlaveServer));
                                    slaveTransMeta.addOrReplacePartitionSchema(partitionSchema);
                                }
                            }
                        }
                    } else {
                        // 
                        for (int slaveNr = 0; slaveNr < slaveServers.size(); slaveNr++) {
                            SlaveServer targetSlaveServer = slaveServers.get(slaveNr);
                            if (!targetSlaveServer.isMaster()) {
                                // SLAVE
                                TransMeta slaveTransMeta = getSlaveTransformation(clusterSchema, targetSlaveServer);
                                // This is the target step
                                // 
                                StepMeta targetStep = slaveTransMeta.findStep(referenceStep.getName());
                                if (targetStep == null) {
                                    targetStep = addSlaveCopy(slaveTransMeta, referenceStep, targetSlaveServer);
                                }
                                // This is the source step
                                // 
                                StepMeta sourceStep = slaveTransMeta.findStep(previousStep.getName());
                                if (sourceStep == null) {
                                    sourceStep = addSlaveCopy(slaveTransMeta, previousStep, targetSlaveServer);
                                }
                                // Add a hop between source and target
                                // 
                                TransHopMeta slaveHop = new TransHopMeta(sourceStep, targetStep);
                                slaveTransMeta.addTransHop(slaveHop);
                                // Verify the partitioning
                                // That means is this case that it is possible that
                                // 
                                // 1) the number of partitions is larger than the number of slaves
                                // 2) the partitioning method might change requiring the source step to do re-partitioning.
                                // 
                                // We need to provide the source step with the information to re-partition correctly.
                                // 
                                // Case 1: both source and target are partitioned on the same partition schema.
                                // 
                                StepPartitioningMeta sourceStepPartitioningMeta = previousStep.getStepPartitioningMeta();
                                StepPartitioningMeta targetStepPartitioningMeta = referenceStep.getStepPartitioningMeta();
                                if (previousStep.isPartitioned() && referenceStep.isPartitioned() && sourceStepPartitioningMeta.equals(targetStepPartitioningMeta)) {
                                    // Just divide the partitions over the available slaves...
                                    // set the appropriate partition schema for both step...
                                    // 
                                    StepPartitioningMeta stepPartitioningMeta = sourceStepPartitioningMeta.clone();
                                    PartitionSchema partitionSchema = stepPartitioningMeta.getPartitionSchema();
                                    partitionSchema.setName(createSlavePartitionSchemaName(partitionSchema.getName()));
                                    if (partitionSchema.isDynamicallyDefined()) {
                                        partitionSchema.expandPartitionsDynamically(clusterSchema.findNrSlaves(), originalTransformation);
                                    }
                                    partitionSchema.retainPartitionsForSlaveServer(clusterSchema.findNrSlaves(), getSlaveServerNumber(clusterSchema, targetSlaveServer));
                                    sourceStep.setStepPartitioningMeta(stepPartitioningMeta);
                                    targetStep.setStepPartitioningMeta(stepPartitioningMeta);
                                    slaveTransMeta.addOrReplacePartitionSchema(partitionSchema);
                                } else if ((!previousStep.isPartitioned() && referenceStep.isPartitioned()) || (previousStep.isPartitioned() && referenceStep.isPartitioned() && !sourceStepPartitioningMeta.equals(targetStep.getStepPartitioningMeta()))) {
                                    // Case 2: both source and target are partitioned on a different partition schema.
                                    // Case 3: source is not partitioned, target is partitioned.
                                    // 
                                    // --> This means that we're re-partitioning!!
                                    // 
                                    PartitionSchema targetPartitionSchema = targetStepPartitioningMeta.getPartitionSchema();
                                    PartitionSchema sourcePartitionSchema = sourceStepPartitioningMeta.getPartitionSchema();
                                    // 
                                    for (int partSlaveNr = 0; partSlaveNr < slaveServers.size(); partSlaveNr++) {
                                        SlaveServer sourceSlaveServer = slaveServers.get(partSlaveNr);
                                        if (!sourceSlaveServer.isMaster()) {
                                            // It's running in 1 or more copies depending on the number of partitions
                                            // Get the number of target partitions...
                                            // 
                                            Map<PartitionSchema, List<String>> partitionsMap = slaveServerPartitionsMap.get(sourceSlaveServer);
                                            int nrOfTargetPartitions = 1;
                                            if (targetStep.isPartitioned() && targetPartitionSchema != null) {
                                                List<String> targetPartitionsList = partitionsMap.get(targetPartitionSchema);
                                                nrOfTargetPartitions = targetPartitionsList.size();
                                            } else if (targetStep.getCopies() > 1) {
                                                nrOfTargetPartitions = targetStep.getCopies();
                                            }
                                            // Get the number of source partitions...
                                            // 
                                            int nrOfSourcePartitions = 1;
                                            if (sourceStep.isPartitioned() && sourcePartitionSchema != null) {
                                                List<String> sourcePartitionsList = partitionsMap.get(sourcePartitionSchema);
                                                nrOfSourcePartitions = sourcePartitionsList.size();
                                            } else if (sourceStep.getCopies() > 1) {
                                                nrOfSourcePartitions = sourceStep.getCopies();
                                            }
                                            // 
                                            for (int sourceCopyNr = 0; sourceCopyNr < nrOfSourcePartitions; sourceCopyNr++) {
                                                for (int targetCopyNr = 0; targetCopyNr < nrOfTargetPartitions; targetCopyNr++) {
                                                    if (!targetSlaveServer.equals(sourceSlaveServer)) {
                                                        // We hit only get the remote steps, NOT the local ones.
                                                        // That's why it's OK to generate all combinations.
                                                        // 
                                                        int outPort = getPort(clusterSchema, targetSlaveServer, sourceStep.getName(), sourceCopyNr, sourceSlaveServer, targetStep.getName(), targetCopyNr);
                                                        RemoteStep remoteOutputStep = new RemoteStep(targetSlaveServer.getHostname(), sourceSlaveServer.getHostname(), Integer.toString(outPort), sourceStep.getName(), sourceCopyNr, targetStep.getName(), targetCopyNr, targetSlaveServer.getName(), sourceSlaveServer.getName(), socketsBufferSize, compressingSocketStreams, originalTransformation.getStepFields(previousStep));
                                                        remoteOutputStep.setEncryptingStreams(encrypt);
                                                        remoteOutputStep.setKey(transformationKey);
                                                        sourceStep.getRemoteOutputSteps().add(remoteOutputStep);
                                                        // OK, so the source step is sending rows out on the reserved ports
                                                        // What we need to do now is link all the OTHER slaves up to them.
                                                        // 
                                                        int inPort = getPort(clusterSchema, sourceSlaveServer, sourceStep.getName(), sourceCopyNr, targetSlaveServer, targetStep.getName(), targetCopyNr);
                                                        RemoteStep remoteInputStep = new RemoteStep(sourceSlaveServer.getHostname(), targetSlaveServer.getHostname(), Integer.toString(inPort), sourceStep.getName(), sourceCopyNr, targetStep.getName(), targetCopyNr, sourceSlaveServer.getName(), targetSlaveServer.getName(), socketsBufferSize, compressingSocketStreams, originalTransformation.getStepFields(previousStep));
                                                        remoteInputStep.setEncryptingStreams(encrypt);
                                                        remoteInputStep.setKey(transformationKey);
                                                        targetStep.getRemoteInputSteps().add(remoteInputStep);
                                                    }
                                                    // OK, save the partition number for the target step in the partition distribution...
                                                    // 
                                                    slaveStepCopyPartitionDistribution.addPartition(sourceSlaveServer.getName(), targetPartitionSchema.getName(), targetCopyNr);
                                                }
                                            }
                                            if (sourceStepPartitioningMeta.isPartitioned()) {
                                                // Set the correct partitioning schema for the source step.
                                                // 
                                                // Set the target partitioning schema for the target step (slave)
                                                // 
                                                StepPartitioningMeta stepPartitioningMeta = sourceStepPartitioningMeta.clone();
                                                PartitionSchema partitionSchema = stepPartitioningMeta.getPartitionSchema();
                                                partitionSchema.setName(createSlavePartitionSchemaName(partitionSchema.getName()));
                                                if (partitionSchema.isDynamicallyDefined()) {
                                                    // Expand the cluster definition to: nrOfSlaves*nrOfPartitionsPerSlave...
                                                    // 
                                                    partitionSchema.expandPartitionsDynamically(clusterSchema.findNrSlaves(), originalTransformation);
                                                }
                                                partitionSchema.retainPartitionsForSlaveServer(clusterSchema.findNrSlaves(), getSlaveServerNumber(clusterSchema, targetSlaveServer));
                                                sourceStep.setStepPartitioningMeta(stepPartitioningMeta);
                                                slaveTransMeta.addOrReplacePartitionSchema(partitionSchema);
                                            }
                                            if (targetStepPartitioningMeta.isPartitioned()) {
                                                // Set the target partitioning schema for the target step (slave)
                                                // 
                                                StepPartitioningMeta stepPartitioningMeta = targetStepPartitioningMeta.clone();
                                                PartitionSchema partitionSchema = stepPartitioningMeta.getPartitionSchema();
                                                partitionSchema.setName(createSlavePartitionSchemaName(partitionSchema.getName()));
                                                if (partitionSchema.isDynamicallyDefined()) {
                                                    partitionSchema.expandPartitionsDynamically(clusterSchema.findNrSlaves(), originalTransformation);
                                                }
                                                partitionSchema.retainPartitionsForSlaveServer(clusterSchema.findNrSlaves(), getSlaveServerNumber(clusterSchema, targetSlaveServer));
                                                targetStep.setStepPartitioningMeta(stepPartitioningMeta);
                                                slaveTransMeta.addOrReplacePartitionSchema(partitionSchema);
                                            }
                                            // 
                                            if (!sourceStepPartitioningMeta.isPartitioned() || !sourceStepPartitioningMeta.equals(targetStepPartitioningMeta)) {
                                                // Not partitioned means the target is partitioned.
                                                // Set the target partitioning on the source...
                                                // Set the correct partitioning schema for the source step.
                                                // 
                                                // Set the target partitioning schema for the target step (slave)
                                                // 
                                                StepPartitioningMeta stepPartitioningMeta = targetStepPartitioningMeta.clone();
                                                PartitionSchema partitionSchema = stepPartitioningMeta.getPartitionSchema();
                                                partitionSchema.setName(createTargetPartitionSchemaName(partitionSchema.getName()));
                                                if (partitionSchema.isDynamicallyDefined()) {
                                                    // Expand the cluster definition to: nrOfSlaves*nrOfPartitionsPerSlave...
                                                    // 
                                                    partitionSchema.expandPartitionsDynamically(clusterSchema.findNrSlaves(), originalTransformation);
                                                }
                                                sourceStep.setTargetStepPartitioningMeta(stepPartitioningMeta);
                                                slaveTransMeta.addOrReplacePartitionSchema(partitionSchema);
                                            }
                                        }
                                    }
                                }
                            }
                        }
                    }
                }
            }
            if (nrPreviousSteps == 0) {
                if (!referenceStep.isClustered()) {
                    // Not clustered, simply add the step.
                    if (masterTransMeta.findStep(referenceStep.getName()) == null) {
                        masterTransMeta.addStep((StepMeta) referenceStep.clone());
                    }
                } else {
                    for (int s = 0; s < slaveServers.size(); s++) {
                        SlaveServer slaveServer = slaveServers.get(s);
                        if (!slaveServer.isMaster()) {
                            // SLAVE
                            TransMeta slave = getSlaveTransformation(clusterSchema, slaveServer);
                            if (slave.findStep(referenceStep.getName()) == null) {
                                addSlaveCopy(slave, referenceStep, slaveServer);
                            }
                        }
                    }
                }
            }
        }
        // 
        for (int i = 0; i < referenceSteps.length; i++) {
            StepMeta originalStep = referenceSteps[i];
            // Also take care of the info steps...
            // For example: StreamLookup, Table Input, etc.
            // 
            StepMeta[] infoSteps = originalTransformation.getInfoStep(originalStep);
            for (int p = 0; infoSteps != null && p < infoSteps.length; p++) {
                StepMeta infoStep = infoSteps[p];
                if (infoStep != null) {
                    if (!originalStep.isClustered()) {
                        if (!infoStep.isClustered()) {
                            // No clustering involved here: just add a link between the reference step and the infostep
                            // 
                            StepMeta target = masterTransMeta.findStep(originalStep.getName());
                            StepMeta source = masterTransMeta.findStep(infoStep.getName());
                            // Add a hop too...
                            TransHopMeta masterHop = new TransHopMeta(source, target);
                            masterTransMeta.addTransHop(masterHop);
                        } else {
                            // reference step is NOT clustered
                            // Previous step is clustered
                            // --> We read from the slave server using socket readers.
                            // We need a reader for each slave server in the cluster
                            // On top of that we need to merge the data from all these steps using a dummy. (to make sure)
                            // That dummy needs to feed into Merge Join
                            // 
                            int nrSlaves = clusterSchema.getSlaveServers().size();
                            for (int s = 0; s < nrSlaves; s++) {
                                SlaveServer sourceSlaveServer = clusterSchema.getSlaveServers().get(s);
                                if (!sourceSlaveServer.isMaster()) {
                                    // //////////////////////////////////////////////////////////////////////////////////////////
                                    // On the SLAVES: add a socket writer...
                                    // 
                                    TransMeta slave = getSlaveTransformation(clusterSchema, sourceSlaveServer);
                                    SocketWriterMeta socketWriterMeta = new SocketWriterMeta();
                                    int port = getPort(clusterSchema, sourceSlaveServer, infoStep.getName(), 0, masterSlaveServer, originalStep.getName(), 0);
                                    socketWriterMeta.setPort("" + port);
                                    socketWriterMeta.setBufferSize(clusterSchema.getSocketsBufferSize());
                                    socketWriterMeta.setFlushInterval(clusterSchema.getSocketsFlushInterval());
                                    socketWriterMeta.setCompressed(clusterSchema.isSocketsCompressed());
                                    StepMeta writerStep = new StepMeta(getWriterName(clusterSchema, sourceSlaveServer, infoStep.getName(), 0, masterSlaveServer, originalStep.getName(), 0), socketWriterMeta);
                                    writerStep.setLocation(infoStep.getLocation().x + 50, infoStep.getLocation().y + 50);
                                    writerStep.setDraw(true);
                                    slave.addStep(writerStep);
                                    // We also need to add a hop between infoStep and the new writer step
                                    // 
                                    TransHopMeta slaveHop = new TransHopMeta(infoStep, writerStep);
                                    if (slave.findTransHop(slaveHop) == null) {
                                        slave.addTransHop(slaveHop);
                                    }
                                    // //////////////////////////////////////////////////////////////////////////////////////////
                                    // On the MASTER : add a socket reader and a dummy step to merge the data...
                                    // 
                                    SocketReaderMeta socketReaderMeta = new SocketReaderMeta();
                                    socketReaderMeta.setPort("" + port);
                                    socketReaderMeta.setBufferSize(clusterSchema.getSocketsBufferSize());
                                    socketReaderMeta.setCompressed(clusterSchema.isSocketsCompressed());
                                    StepMeta readerStep = new StepMeta(getReaderName(clusterSchema, sourceSlaveServer, infoStep.getName(), 0, masterSlaveServer, originalStep.getName(), 0), socketReaderMeta);
                                    readerStep.setLocation(infoStep.getLocation().x, infoStep.getLocation().y + (s * FANOUT * 2) - (nrSlaves * FANOUT / 2));
                                    readerStep.setDraw(true);
                                    masterTransMeta.addStep(readerStep);
                                    // Also add a single dummy step in the master that will merge the data from the slave
                                    // transformations.
                                    // 
                                    String dummyName = infoStep.getName();
                                    StepMeta dummyStep = masterTransMeta.findStep(dummyName);
                                    if (dummyStep == null) {
                                        DummyTransMeta dummy = new DummyTransMeta();
                                        dummyStep = new StepMeta(dummyName, dummy);
                                        dummyStep.setLocation(infoStep.getLocation().x + (SPLIT / 2), infoStep.getLocation().y);
                                        dummyStep.setDraw(true);
                                        dummyStep.setDescription("This step merges the data from the various data streams coming " + "from the slave transformations.\nIt does that right before it hits the step that " + "reads from a specific (info) step.");
                                        masterTransMeta.addStep(dummyStep);
                                        // Now we need a hop from the dummy merge step to the actual target step (original step)
                                        // 
                                        StepMeta masterTargetStep = masterTransMeta.findStep(originalStep.getName());
                                        TransHopMeta targetHop = new TransHopMeta(dummyStep, masterTargetStep);
                                        masterTransMeta.addTransHop(targetHop);
                                        // Set the master target step as an info step... (use the cloned copy)
                                        // 
                                        String[] infoStepNames = masterTargetStep.getStepMetaInterface().getStepIOMeta().getInfoStepnames();
                                        if (infoStepNames != null) {
                                            StepMeta[] is = new StepMeta[infoStepNames.length];
                                            for (int n = 0; n < infoStepNames.length; n++) {
                                                // OK, info steps moved to the slave steps
                                                is[n] = slave.findStep(infoStepNames[n]);
                                                if (infoStepNames[n].equals(infoStep.getName())) {
                                                    // We want to replace this one with the reader step: that's where we source from now
                                                    infoSteps[n] = readerStep;
                                                }
                                            }
                                            masterTargetStep.getStepMetaInterface().getStepIOMeta().setInfoSteps(infoSteps);
                                        }
                                    }
                                    // Add a hop between the reader step and the dummy
                                    // 
                                    TransHopMeta mergeHop = new TransHopMeta(readerStep, dummyStep);
                                    if (masterTransMeta.findTransHop(mergeHop) == null) {
                                        masterTransMeta.addTransHop(mergeHop);
                                    }
                                }
                            }
                        }
                    } else {
                        if (!infoStep.isClustered()) {
                            // 
                            for (int s = 0; s < slaveServers.size(); s++) {
                                SlaveServer targetSlaveServer = slaveServers.get(s);
                                if (!targetSlaveServer.isMaster()) {
                                    // MASTER
                                    SocketWriterMeta socketWriterMeta = new SocketWriterMeta();
                                    socketWriterMeta.setPort("" + getPort(clusterSchema, masterSlaveServer, infoStep.getName(), 0, targetSlaveServer, originalStep.getName(), 0));
                                    socketWriterMeta.setBufferSize(clusterSchema.getSocketsBufferSize());
                                    socketWriterMeta.setFlushInterval(clusterSchema.getSocketsFlushInterval());
                                    socketWriterMeta.setCompressed(clusterSchema.isSocketsCompressed());
                                    StepMeta writerStep = new StepMeta(getWriterName(clusterSchema, masterSlaveServer, infoStep.getName(), 0, targetSlaveServer, originalStep.getName(), 0), socketWriterMeta);
                                    writerStep.setLocation(originalStep.getLocation().x, originalStep.getLocation().y + (s * FANOUT * 2) - (nrSlavesNodes * FANOUT / 2));
                                    writerStep.setDraw(originalStep.isDrawn());
                                    masterTransMeta.addStep(writerStep);
                                    // The previous step: add a hop to it.
                                    // It still has the original name as it is not clustered.
                                    // 
                                    StepMeta previous = masterTransMeta.findStep(infoStep.getName());
                                    if (previous == null) {
                                        previous = (StepMeta) infoStep.clone();
                                        masterTransMeta.addStep(previous);
                                    }
                                    TransHopMeta masterHop = new TransHopMeta(previous, writerStep);
                                    masterTransMeta.addTransHop(masterHop);
                                    // SLAVE
                                    TransMeta slave = getSlaveTransformation(clusterSchema, targetSlaveServer);
                                    SocketReaderMeta socketReaderMeta = new SocketReaderMeta();
                                    socketReaderMeta.setHostname(masterSlaveServer.getHostname());
                                    socketReaderMeta.setPort("" + getPort(clusterSchema, masterSlaveServer, infoStep.getName(), 0, targetSlaveServer, originalStep.getName(), 0));
                                    socketReaderMeta.setBufferSize(clusterSchema.getSocketsBufferSize());
                                    socketReaderMeta.setCompressed(clusterSchema.isSocketsCompressed());
                                    StepMeta readerStep = new StepMeta(getReaderName(clusterSchema, masterSlaveServer, infoStep.getName(), 0, targetSlaveServer, originalStep.getName(), 0), socketReaderMeta);
                                    readerStep.setLocation(originalStep.getLocation().x - (SPLIT / 2), originalStep.getLocation().y);
                                    readerStep.setDraw(originalStep.isDrawn());
                                    slave.addStep(readerStep);
                                    // also add the step itself.
                                    StepMeta slaveStep = slave.findStep(originalStep.getName());
                                    if (slaveStep == null) {
                                        slaveStep = addSlaveCopy(slave, originalStep, targetSlaveServer);
                                    }
                                    // And a hop from the
                                    TransHopMeta slaveHop = new TransHopMeta(readerStep, slaveStep);
                                    slave.addTransHop(slaveHop);
                                    // 
                                    // Now we have to explain to the slaveStep that it has to source from previous
                                    // 
                                    String[] infoStepNames = slaveStep.getStepMetaInterface().getStepIOMeta().getInfoStepnames();
                                    if (infoStepNames != null) {
                                        StepMeta[] is = new StepMeta[infoStepNames.length];
                                        for (int n = 0; n < infoStepNames.length; n++) {
                                            // OK, info steps moved to the slave steps
                                            is[n] = slave.findStep(infoStepNames[n]);
                                            if (infoStepNames[n].equals(infoStep.getName())) {
                                                // We want to replace this one with the reader step: that's where we source from now
                                                infoSteps[n] = readerStep;
                                            }
                                        }
                                        slaveStep.getStepMetaInterface().getStepIOMeta().setInfoSteps(infoSteps);
                                    }
                                }
                            }
                        } else {
                            // 
                            for (int s = 0; s < slaveServers.size(); s++) {
                                SlaveServer slaveServer = slaveServers.get(s);
                                if (!slaveServer.isMaster()) {
                                    TransMeta slave = getSlaveTransformation(clusterSchema, slaveServer);
                                    StepMeta slaveStep = slave.findStep(originalStep.getName());
                                    String[] infoStepNames = slaveStep.getStepMetaInterface().getStepIOMeta().getInfoStepnames();
                                    if (infoStepNames != null) {
                                        StepMeta[] is = new StepMeta[infoStepNames.length];
                                        for (int n = 0; n < infoStepNames.length; n++) {
                                            // OK, info steps moved to the slave steps
                                            is[n] = slave.findStep(infoStepNames[n]);
                                            // Hang on... is there a hop to the previous step?
                                            if (slave.findTransHop(is[n], slaveStep) == null) {
                                                TransHopMeta infoHop = new TransHopMeta(is[n], slaveStep);
                                                slave.addTransHop(infoHop);
                                            }
                                        }
                                        slaveStep.getStepMetaInterface().getStepIOMeta().setInfoSteps(infoSteps);
                                    }
                                }
                            }
                        }
                    }
                }
            }
        }
        // Also add the original list of partition schemas to the slave step copy partition distribution...
        // 
        slaveStepCopyPartitionDistribution.setOriginalPartitionSchemas(originalTransformation.getPartitionSchemas());
        // 
        for (TransMeta transMeta : slaveTransMap.values()) {
            transMeta.setSlaveStepCopyPartitionDistribution(slaveStepCopyPartitionDistribution);
            if (encrypt) {
                transMeta.setKey(pubK.getEncoded());
                transMeta.setPrivateKey(false);
            }
            transMeta.clearChanged();
        }
        // do not erase partitioning schema for master transformation
        // if some of steps is expected to run on master partitioned, that is the case
        // when partition schema should exists as 'local' partition schema instead of slave's remote one
        // see PDI-12766
        // NOTE: PDI-18333 keep newly created partitionSchemas and add back in original per PDI-12766
        masterTransMeta.addOrReplacePartitionSchema(originalTransformation.getPartitionSchemas());
        masterTransMeta.setSlaveStepCopyPartitionDistribution(slaveStepCopyPartitionDistribution);
        if (encrypt) {
            masterTransMeta.setKey(pubK.getEncoded());
            masterTransMeta.setPrivateKey(!false);
        }
        masterTransMeta.clearChanged();
    // We're absolutely done here...
    } catch (Exception e) {
        throw new KettleException("Unexpected problem while generating master transformation", e);
    }
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) PrivateKey(java.security.PrivateKey) SocketWriterMeta(org.pentaho.di.trans.steps.socketwriter.SocketWriterMeta) TransMeta(org.pentaho.di.trans.TransMeta) DummyTransMeta(org.pentaho.di.trans.steps.dummytrans.DummyTransMeta) IllegalBlockSizeException(javax.crypto.IllegalBlockSizeException) SlaveServer(org.pentaho.di.cluster.SlaveServer) StepPartitioningMeta(org.pentaho.di.trans.step.StepPartitioningMeta) DummyTransMeta(org.pentaho.di.trans.steps.dummytrans.DummyTransMeta) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) List(java.util.List) KeyPair(java.security.KeyPair) SocketReaderMeta(org.pentaho.di.trans.steps.socketreader.SocketReaderMeta) PartitionSchema(org.pentaho.di.partition.PartitionSchema) PublicKey(java.security.PublicKey) InvalidKeyException(java.security.InvalidKeyException) StepMeta(org.pentaho.di.trans.step.StepMeta) KettleException(org.pentaho.di.core.exception.KettleException) IllegalBlockSizeException(javax.crypto.IllegalBlockSizeException) InvalidKeyException(java.security.InvalidKeyException) LinkedList(java.util.LinkedList) RemoteStep(org.pentaho.di.trans.step.RemoteStep) TransHopMeta(org.pentaho.di.trans.TransHopMeta) HashMap(java.util.HashMap) Map(java.util.Map) ClusterSchema(org.pentaho.di.cluster.ClusterSchema) PublicKey(java.security.PublicKey) Key(java.security.Key) PrivateKey(java.security.PrivateKey)

Example 100 with SlaveServer

use of org.pentaho.di.cluster.SlaveServer in project pentaho-kettle by pentaho.

the class Trans method getClusteredTransformationResult.

/**
 * Gets the clustered transformation result.
 *
 * @param log               the log channel interface
 * @param transSplitter     the TransSplitter object
 * @param parentJob         the parent job
 * @param loggingRemoteWork log remote execution logs locally
 * @return the clustered transformation result
 */
public static final Result getClusteredTransformationResult(LogChannelInterface log, TransSplitter transSplitter, Job parentJob, boolean loggingRemoteWork) {
    Result result = new Result();
    // 
    // See if the remote transformations have finished.
    // We could just look at the master, but I doubt that that is enough in all situations.
    // 
    // <-- ask these guys
    SlaveServer[] slaveServers = transSplitter.getSlaveTargets();
    TransMeta[] slaves = transSplitter.getSlaves();
    SlaveServer masterServer;
    try {
        masterServer = transSplitter.getMasterServer();
    } catch (KettleException e) {
        log.logError("Error getting the master server", e);
        masterServer = null;
        result.setNrErrors(result.getNrErrors() + 1);
    }
    TransMeta master = transSplitter.getMaster();
    // 
    for (int s = 0; s < slaveServers.length; s++) {
        try {
            // Get the detailed status of the slave transformation...
            // 
            SlaveServerTransStatus transStatus = slaveServers[s].getTransStatus(slaves[s].getName(), "", 0);
            Result transResult = transStatus.getResult(slaves[s]);
            result.add(transResult);
            if (loggingRemoteWork) {
                log.logBasic("-- Slave : " + slaveServers[s].getName());
                log.logBasic(transStatus.getLoggingString());
            }
        } catch (Exception e) {
            result.setNrErrors(result.getNrErrors() + 1);
            log.logError("Unable to contact slave server '" + slaveServers[s].getName() + "' to get result of slave transformation : " + e.toString());
        }
    }
    // 
    if (master != null && master.nrSteps() > 0) {
        try {
            // Get the detailed status of the slave transformation...
            // 
            SlaveServerTransStatus transStatus = masterServer.getTransStatus(master.getName(), "", 0);
            Result transResult = transStatus.getResult(master);
            result.add(transResult);
            if (loggingRemoteWork) {
                log.logBasic("-- Master : " + masterServer.getName());
                log.logBasic(transStatus.getLoggingString());
            }
        } catch (Exception e) {
            result.setNrErrors(result.getNrErrors() + 1);
            log.logError("Unable to contact master server '" + masterServer.getName() + "' to get result of master transformation : " + e.toString());
        }
    }
    return result;
}
Also used : KettleException(org.pentaho.di.core.exception.KettleException) SlaveServerTransStatus(org.pentaho.di.www.SlaveServerTransStatus) SlaveServer(org.pentaho.di.cluster.SlaveServer) KettleExtensionPoint(org.pentaho.di.core.extension.KettleExtensionPoint) UnknownParamException(org.pentaho.di.core.parameters.UnknownParamException) KettleValueException(org.pentaho.di.core.exception.KettleValueException) KettleTransException(org.pentaho.di.core.exception.KettleTransException) DuplicateParamException(org.pentaho.di.core.parameters.DuplicateParamException) KettleFileException(org.pentaho.di.core.exception.KettleFileException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) KettleException(org.pentaho.di.core.exception.KettleException) KettleDatabaseException(org.pentaho.di.core.exception.KettleDatabaseException) WebResult(org.pentaho.di.www.WebResult) Result(org.pentaho.di.core.Result)

Aggregations

SlaveServer (org.pentaho.di.cluster.SlaveServer)110 KettleException (org.pentaho.di.core.exception.KettleException)35 DatabaseMeta (org.pentaho.di.core.database.DatabaseMeta)32 Test (org.junit.Test)22 ClusterSchema (org.pentaho.di.cluster.ClusterSchema)22 KettleDatabaseException (org.pentaho.di.core.exception.KettleDatabaseException)18 PartitionSchema (org.pentaho.di.partition.PartitionSchema)18 KettleExtensionPoint (org.pentaho.di.core.extension.KettleExtensionPoint)17 JobMeta (org.pentaho.di.job.JobMeta)16 ObjectId (org.pentaho.di.repository.ObjectId)16 StepMeta (org.pentaho.di.trans.step.StepMeta)14 ArrayList (java.util.ArrayList)13 TransMeta (org.pentaho.di.trans.TransMeta)11 Result (org.pentaho.di.core.Result)10 KettleFileException (org.pentaho.di.core.exception.KettleFileException)10 UnknownParamException (org.pentaho.di.core.parameters.UnknownParamException)10 NotePadMeta (org.pentaho.di.core.NotePadMeta)9 Point (org.pentaho.di.core.gui.Point)8 List (java.util.List)7 KettleXMLException (org.pentaho.di.core.exception.KettleXMLException)7